1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
60 1. choose insn with the longest path to end of bb, ties
62 2. choose insn with least contribution to register pressure,
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using insn backward dependences
86 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
87 INSN_FORW_DEPS the purpose of forward list scheduling.
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
129 #include "coretypes.h"
131 #include "diagnostic-core.h"
134 #include "hard-reg-set.h"
136 #include "function.h"
138 #include "insn-config.h"
139 #include "insn-attr.h"
142 #include "sched-int.h"
144 #include "common/common-target.h"
151 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
154 #ifdef INSN_SCHEDULING
156 /* issue_rate is the number of insns that can be scheduled in the same
157 machine cycle. It can be defined in the config/mach/mach.h file,
158 otherwise we set it to 1. */
162 /* This can be set to true by a backend if the scheduler should not
163 enable a DCE pass. */
166 /* sched-verbose controls the amount of debugging output the
167 scheduler prints. It is controlled by -fsched-verbose=N:
168 N>0 and no -DSR : the output is directed to stderr.
169 N>=10 will direct the printouts to stderr (regardless of -dSR).
171 N=2: bb's probabilities, detailed ready list info, unit/insn info.
172 N=3: rtl at abort point, control-flow, regions info.
173 N=5: dependences info. */
175 int sched_verbose
= 0;
177 /* Debugging file. All printouts are sent to dump, which is always set,
178 either to stderr, or to the dump listing file (-dRS). */
179 FILE *sched_dump
= 0;
181 /* This is a placeholder for the scheduler parameters common
182 to all schedulers. */
183 struct common_sched_info_def
*common_sched_info
;
185 #define INSN_TICK(INSN) (HID (INSN)->tick)
186 #define INSN_EXACT_TICK(INSN) (HID (INSN)->exact_tick)
187 #define INSN_TICK_ESTIMATE(INSN) (HID (INSN)->tick_estimate)
188 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
189 #define FEEDS_BACKTRACK_INSN(INSN) (HID (INSN)->feeds_backtrack_insn)
190 #define SHADOW_P(INSN) (HID (INSN)->shadow_p)
192 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
193 then it should be recalculated from scratch. */
194 #define INVALID_TICK (-(max_insn_queue_index + 1))
195 /* The minimal value of the INSN_TICK of an instruction. */
196 #define MIN_TICK (-max_insn_queue_index)
198 /* List of important notes we must keep around. This is a pointer to the
199 last element in the list. */
202 static struct spec_info_def spec_info_var
;
203 /* Description of the speculative part of the scheduling.
204 If NULL - no speculation. */
205 spec_info_t spec_info
= NULL
;
207 /* True, if recovery block was added during scheduling of current block.
208 Used to determine, if we need to fix INSN_TICKs. */
209 static bool haifa_recovery_bb_recently_added_p
;
211 /* True, if recovery block was added during this scheduling pass.
212 Used to determine if we should have empty memory pools of dependencies
213 after finishing current region. */
214 bool haifa_recovery_bb_ever_added_p
;
216 /* Counters of different types of speculative instructions. */
217 static int nr_begin_data
, nr_be_in_data
, nr_begin_control
, nr_be_in_control
;
219 /* Array used in {unlink, restore}_bb_notes. */
220 static rtx
*bb_header
= 0;
222 /* Basic block after which recovery blocks will be created. */
223 static basic_block before_recovery
;
225 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
227 basic_block after_recovery
;
229 /* FALSE if we add bb to another region, so we don't need to initialize it. */
230 bool adding_bb_to_current_region_p
= true;
234 /* An instruction is ready to be scheduled when all insns preceding it
235 have already been scheduled. It is important to ensure that all
236 insns which use its result will not be executed until its result
237 has been computed. An insn is maintained in one of four structures:
239 (P) the "Pending" set of insns which cannot be scheduled until
240 their dependencies have been satisfied.
241 (Q) the "Queued" set of insns that can be scheduled when sufficient
243 (R) the "Ready" list of unscheduled, uncommitted insns.
244 (S) the "Scheduled" list of insns.
246 Initially, all insns are either "Pending" or "Ready" depending on
247 whether their dependencies are satisfied.
249 Insns move from the "Ready" list to the "Scheduled" list as they
250 are committed to the schedule. As this occurs, the insns in the
251 "Pending" list have their dependencies satisfied and move to either
252 the "Ready" list or the "Queued" set depending on whether
253 sufficient time has passed to make them ready. As time passes,
254 insns move from the "Queued" set to the "Ready" list.
256 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
257 unscheduled insns, i.e., those that are ready, queued, and pending.
258 The "Queued" set (Q) is implemented by the variable `insn_queue'.
259 The "Ready" list (R) is implemented by the variables `ready' and
261 The "Scheduled" list (S) is the new insn chain built by this pass.
263 The transition (R->S) is implemented in the scheduling loop in
264 `schedule_block' when the best insn to schedule is chosen.
265 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
266 insns move from the ready list to the scheduled list.
267 The transition (Q->R) is implemented in 'queue_to_insn' as time
268 passes or stalls are introduced. */
270 /* Implement a circular buffer to delay instructions until sufficient
271 time has passed. For the new pipeline description interface,
272 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
273 than maximal time of instruction execution computed by genattr.c on
274 the base maximal time of functional unit reservations and getting a
275 result. This is the longest time an insn may be queued. */
277 static rtx
*insn_queue
;
278 static int q_ptr
= 0;
279 static int q_size
= 0;
280 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
281 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
283 #define QUEUE_SCHEDULED (-3)
284 #define QUEUE_NOWHERE (-2)
285 #define QUEUE_READY (-1)
286 /* QUEUE_SCHEDULED - INSN is scheduled.
287 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
289 QUEUE_READY - INSN is in ready list.
290 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
292 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
294 /* The following variable value refers for all current and future
295 reservations of the processor units. */
298 /* The following variable value is size of memory representing all
299 current and future reservations of the processor units. */
300 size_t dfa_state_size
;
302 /* The following array is used to find the best insn from ready when
303 the automaton pipeline interface is used. */
304 char *ready_try
= NULL
;
306 /* The ready list. */
307 struct ready_list ready
= {NULL
, 0, 0, 0, 0};
309 /* The pointer to the ready list (to be removed). */
310 static struct ready_list
*readyp
= &ready
;
312 /* Scheduling clock. */
313 static int clock_var
;
315 /* Clock at which the previous instruction was issued. */
316 static int last_clock_var
;
318 /* Set to true if, when queuing a shadow insn, we discover that it would be
319 scheduled too late. */
320 static bool must_backtrack
;
322 /* The following variable value is number of essential insns issued on
323 the current cycle. An insn is essential one if it changes the
325 int cycle_issued_insns
;
327 /* This records the actual schedule. It is built up during the main phase
328 of schedule_block, and afterwards used to reorder the insns in the RTL. */
329 static VEC(rtx
, heap
) *scheduled_insns
;
331 static int may_trap_exp (const_rtx
, int);
333 /* Nonzero iff the address is comprised from at most 1 register. */
334 #define CONST_BASED_ADDRESS_P(x) \
336 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
337 || (GET_CODE (x) == LO_SUM)) \
338 && (CONSTANT_P (XEXP (x, 0)) \
339 || CONSTANT_P (XEXP (x, 1)))))
341 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
342 as found by analyzing insn's expression. */
345 static int haifa_luid_for_non_insn (rtx x
);
347 /* Haifa version of sched_info hooks common to all headers. */
348 const struct common_sched_info_def haifa_common_sched_info
=
350 NULL
, /* fix_recovery_cfg */
351 NULL
, /* add_block */
352 NULL
, /* estimate_number_of_insns */
353 haifa_luid_for_non_insn
, /* luid_for_non_insn */
354 SCHED_PASS_UNKNOWN
/* sched_pass_id */
357 /* Mapping from instruction UID to its Logical UID. */
358 VEC (int, heap
) *sched_luids
= NULL
;
360 /* Next LUID to assign to an instruction. */
361 int sched_max_luid
= 1;
363 /* Haifa Instruction Data. */
364 VEC (haifa_insn_data_def
, heap
) *h_i_d
= NULL
;
366 void (* sched_init_only_bb
) (basic_block
, basic_block
);
368 /* Split block function. Different schedulers might use different functions
369 to handle their internal data consistent. */
370 basic_block (* sched_split_block
) (basic_block
, rtx
);
372 /* Create empty basic block after the specified block. */
373 basic_block (* sched_create_empty_bb
) (basic_block
);
376 may_trap_exp (const_rtx x
, int is_store
)
385 if (code
== MEM
&& may_trap_p (x
))
392 /* The insn uses memory: a volatile load. */
393 if (MEM_VOLATILE_P (x
))
395 /* An exception-free load. */
398 /* A load with 1 base register, to be further checked. */
399 if (CONST_BASED_ADDRESS_P (XEXP (x
, 0)))
400 return PFREE_CANDIDATE
;
401 /* No info on the load, to be further checked. */
402 return PRISKY_CANDIDATE
;
407 int i
, insn_class
= TRAP_FREE
;
409 /* Neither store nor load, check if it may cause a trap. */
412 /* Recursive step: walk the insn... */
413 fmt
= GET_RTX_FORMAT (code
);
414 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
418 int tmp_class
= may_trap_exp (XEXP (x
, i
), is_store
);
419 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
421 else if (fmt
[i
] == 'E')
424 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
426 int tmp_class
= may_trap_exp (XVECEXP (x
, i
, j
), is_store
);
427 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
428 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
432 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
439 /* Classifies rtx X of an insn for the purpose of verifying that X can be
440 executed speculatively (and consequently the insn can be moved
441 speculatively), by examining X, returning:
442 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
443 TRAP_FREE: non-load insn.
444 IFREE: load from a globally safe location.
445 IRISKY: volatile load.
446 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
447 being either PFREE or PRISKY. */
450 haifa_classify_rtx (const_rtx x
)
452 int tmp_class
= TRAP_FREE
;
453 int insn_class
= TRAP_FREE
;
456 if (GET_CODE (x
) == PARALLEL
)
458 int i
, len
= XVECLEN (x
, 0);
460 for (i
= len
- 1; i
>= 0; i
--)
462 tmp_class
= haifa_classify_rtx (XVECEXP (x
, 0, i
));
463 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
464 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
474 /* Test if it is a 'store'. */
475 tmp_class
= may_trap_exp (XEXP (x
, 0), 1);
478 /* Test if it is a store. */
479 tmp_class
= may_trap_exp (SET_DEST (x
), 1);
480 if (tmp_class
== TRAP_RISKY
)
482 /* Test if it is a load. */
484 WORST_CLASS (tmp_class
,
485 may_trap_exp (SET_SRC (x
), 0));
488 tmp_class
= haifa_classify_rtx (COND_EXEC_CODE (x
));
489 if (tmp_class
== TRAP_RISKY
)
491 tmp_class
= WORST_CLASS (tmp_class
,
492 may_trap_exp (COND_EXEC_TEST (x
), 0));
495 tmp_class
= TRAP_RISKY
;
499 insn_class
= tmp_class
;
506 haifa_classify_insn (const_rtx insn
)
508 return haifa_classify_rtx (PATTERN (insn
));
511 /* A structure to record a pair of insns where the first one is a real
512 insn that has delay slots, and the second is its delayed shadow.
513 I1 is scheduled normally and will emit an assembly instruction,
514 while I2 describes the side effect that takes place at the
515 transition between cycles CYCLES and (CYCLES + 1) after I1. */
518 struct delay_pair
*next_same_i1
;
523 /* Two hash tables to record delay_pairs, one indexed by I1 and the other
525 static htab_t delay_htab
;
526 static htab_t delay_htab_i2
;
528 /* Returns a hash value for X (which really is a delay_pair), based on
531 delay_hash_i1 (const void *x
)
533 return htab_hash_pointer (((const struct delay_pair
*) x
)->i1
);
536 /* Returns a hash value for X (which really is a delay_pair), based on
539 delay_hash_i2 (const void *x
)
541 return htab_hash_pointer (((const struct delay_pair
*) x
)->i2
);
544 /* Return nonzero if I1 of pair X is the same as that of pair Y. */
546 delay_i1_eq (const void *x
, const void *y
)
548 return ((const struct delay_pair
*) x
)->i1
== y
;
551 /* Return nonzero if I2 of pair X is the same as that of pair Y. */
553 delay_i2_eq (const void *x
, const void *y
)
555 return ((const struct delay_pair
*) x
)->i2
== y
;
558 /* This function can be called by a port just before it starts the
559 final scheduling pass. It records the fact that an instruction
560 with delay slots has been split into two insns, I1 and I2. The
561 first one will be scheduled normally and initiates the operation.
562 The second one is a shadow which must follow a specific number of
563 CYCLES after I1; its only purpose is to show the side effect that
564 occurs at that cycle in the RTL. If a JUMP_INSN or a CALL_INSN has
565 been split, I1 should be a normal INSN, while I2 retains the
566 original insn type. */
569 record_delay_slot_pair (rtx i1
, rtx i2
, int cycles
)
571 struct delay_pair
*p
= XNEW (struct delay_pair
);
572 struct delay_pair
**slot
;
580 delay_htab
= htab_create (10, delay_hash_i1
, delay_i1_eq
, NULL
);
581 delay_htab_i2
= htab_create (10, delay_hash_i2
, delay_i2_eq
, free
);
583 slot
= ((struct delay_pair
**)
584 htab_find_slot_with_hash (delay_htab
, i1
, htab_hash_pointer (i1
),
586 p
->next_same_i1
= *slot
;
588 slot
= ((struct delay_pair
**)
589 htab_find_slot_with_hash (delay_htab_i2
, i2
, htab_hash_pointer (i2
),
594 /* For a pair P of insns, return the fixed distance in cycles from the first
595 insn after which the second must be scheduled. */
597 pair_delay (struct delay_pair
*p
)
602 /* Given an insn INSN, add a dependence on its delayed shadow if it
603 has one. Also try to find situations where shadows depend on each other
604 and add dependencies to the real insns to limit the amount of backtracking
607 add_delay_dependencies (rtx insn
)
609 struct delay_pair
*pair
;
610 sd_iterator_def sd_it
;
617 = (struct delay_pair
*)htab_find_with_hash (delay_htab_i2
, insn
,
618 htab_hash_pointer (insn
));
621 add_dependence (insn
, pair
->i1
, REG_DEP_ANTI
);
623 FOR_EACH_DEP (pair
->i2
, SD_LIST_BACK
, sd_it
, dep
)
625 rtx pro
= DEP_PRO (dep
);
626 struct delay_pair
*other_pair
627 = (struct delay_pair
*)htab_find_with_hash (delay_htab_i2
, pro
,
628 htab_hash_pointer (pro
));
631 if (pair_delay (other_pair
) >= pair_delay (pair
))
633 if (sched_verbose
>= 4)
635 fprintf (sched_dump
, ";;\tadding dependence %d <- %d\n",
636 INSN_UID (other_pair
->i1
),
637 INSN_UID (pair
->i1
));
638 fprintf (sched_dump
, ";;\tpair1 %d <- %d, cost %d\n",
642 fprintf (sched_dump
, ";;\tpair2 %d <- %d, cost %d\n",
643 INSN_UID (other_pair
->i1
),
644 INSN_UID (other_pair
->i2
),
645 pair_delay (other_pair
));
647 add_dependence (pair
->i1
, other_pair
->i1
, REG_DEP_ANTI
);
652 /* Forward declarations. */
654 static int priority (rtx
);
655 static int rank_for_schedule (const void *, const void *);
656 static void swap_sort (rtx
*, int);
657 static void queue_insn (rtx
, int, const char *);
658 static int schedule_insn (rtx
);
659 static void adjust_priority (rtx
);
660 static void advance_one_cycle (void);
661 static void extend_h_i_d (void);
664 /* Notes handling mechanism:
665 =========================
666 Generally, NOTES are saved before scheduling and restored after scheduling.
667 The scheduler distinguishes between two types of notes:
669 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
670 Before scheduling a region, a pointer to the note is added to the insn
671 that follows or precedes it. (This happens as part of the data dependence
672 computation). After scheduling an insn, the pointer contained in it is
673 used for regenerating the corresponding note (in reemit_notes).
675 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
676 these notes are put in a list (in rm_other_notes() and
677 unlink_other_notes ()). After scheduling the block, these notes are
678 inserted at the beginning of the block (in schedule_block()). */
680 static void ready_add (struct ready_list
*, rtx
, bool);
681 static rtx
ready_remove_first (struct ready_list
*);
682 static rtx
ready_remove_first_dispatch (struct ready_list
*ready
);
684 static void queue_to_ready (struct ready_list
*);
685 static int early_queue_to_ready (state_t
, struct ready_list
*);
687 static void debug_ready_list (struct ready_list
*);
689 /* The following functions are used to implement multi-pass scheduling
690 on the first cycle. */
691 static rtx
ready_remove (struct ready_list
*, int);
692 static void ready_remove_insn (rtx
);
694 static void fix_inter_tick (rtx
, rtx
);
695 static int fix_tick_ready (rtx
);
696 static void change_queue_index (rtx
, int);
698 /* The following functions are used to implement scheduling of data/control
699 speculative instructions. */
701 static void extend_h_i_d (void);
702 static void init_h_i_d (rtx
);
703 static void generate_recovery_code (rtx
);
704 static void process_insn_forw_deps_be_in_spec (rtx
, rtx
, ds_t
);
705 static void begin_speculative_block (rtx
);
706 static void add_to_speculative_block (rtx
);
707 static void init_before_recovery (basic_block
*);
708 static void create_check_block_twin (rtx
, bool);
709 static void fix_recovery_deps (basic_block
);
710 static void haifa_change_pattern (rtx
, rtx
);
711 static void dump_new_block_header (int, basic_block
, rtx
, rtx
);
712 static void restore_bb_notes (basic_block
);
713 static void fix_jump_move (rtx
);
714 static void move_block_after_check (rtx
);
715 static void move_succs (VEC(edge
,gc
) **, basic_block
);
716 static void sched_remove_insn (rtx
);
717 static void clear_priorities (rtx
, rtx_vec_t
*);
718 static void calc_priorities (rtx_vec_t
);
719 static void add_jump_dependencies (rtx
, rtx
);
720 #ifdef ENABLE_CHECKING
721 static int has_edge_p (VEC(edge
,gc
) *, int);
722 static void check_cfg (rtx
, rtx
);
725 #endif /* INSN_SCHEDULING */
727 /* Point to state used for the current scheduling pass. */
728 struct haifa_sched_info
*current_sched_info
;
730 #ifndef INSN_SCHEDULING
732 schedule_insns (void)
737 /* Do register pressure sensitive insn scheduling if the flag is set
739 bool sched_pressure_p
;
741 /* Map regno -> its pressure class. The map defined only when
742 SCHED_PRESSURE_P is true. */
743 enum reg_class
*sched_regno_pressure_class
;
745 /* The current register pressure. Only elements corresponding pressure
746 classes are defined. */
747 static int curr_reg_pressure
[N_REG_CLASSES
];
749 /* Saved value of the previous array. */
750 static int saved_reg_pressure
[N_REG_CLASSES
];
752 /* Register living at given scheduling point. */
753 static bitmap curr_reg_live
;
755 /* Saved value of the previous array. */
756 static bitmap saved_reg_live
;
758 /* Registers mentioned in the current region. */
759 static bitmap region_ref_regs
;
761 /* Initiate register pressure relative info for scheduling the current
762 region. Currently it is only clearing register mentioned in the
765 sched_init_region_reg_pressure_info (void)
767 bitmap_clear (region_ref_regs
);
770 /* Update current register pressure related info after birth (if
771 BIRTH_P) or death of register REGNO. */
773 mark_regno_birth_or_death (int regno
, bool birth_p
)
775 enum reg_class pressure_class
;
777 pressure_class
= sched_regno_pressure_class
[regno
];
778 if (regno
>= FIRST_PSEUDO_REGISTER
)
780 if (pressure_class
!= NO_REGS
)
784 bitmap_set_bit (curr_reg_live
, regno
);
785 curr_reg_pressure
[pressure_class
]
786 += (ira_reg_class_max_nregs
787 [pressure_class
][PSEUDO_REGNO_MODE (regno
)]);
791 bitmap_clear_bit (curr_reg_live
, regno
);
792 curr_reg_pressure
[pressure_class
]
793 -= (ira_reg_class_max_nregs
794 [pressure_class
][PSEUDO_REGNO_MODE (regno
)]);
798 else if (pressure_class
!= NO_REGS
799 && ! TEST_HARD_REG_BIT (ira_no_alloc_regs
, regno
))
803 bitmap_set_bit (curr_reg_live
, regno
);
804 curr_reg_pressure
[pressure_class
]++;
808 bitmap_clear_bit (curr_reg_live
, regno
);
809 curr_reg_pressure
[pressure_class
]--;
814 /* Initiate current register pressure related info from living
815 registers given by LIVE. */
817 initiate_reg_pressure_info (bitmap live
)
823 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
824 curr_reg_pressure
[ira_pressure_classes
[i
]] = 0;
825 bitmap_clear (curr_reg_live
);
826 EXECUTE_IF_SET_IN_BITMAP (live
, 0, j
, bi
)
827 if (current_nr_blocks
== 1 || bitmap_bit_p (region_ref_regs
, j
))
828 mark_regno_birth_or_death (j
, true);
831 /* Mark registers in X as mentioned in the current region. */
833 setup_ref_regs (rtx x
)
836 const RTX_CODE code
= GET_CODE (x
);
842 if (HARD_REGISTER_NUM_P (regno
))
843 bitmap_set_range (region_ref_regs
, regno
,
844 hard_regno_nregs
[regno
][GET_MODE (x
)]);
846 bitmap_set_bit (region_ref_regs
, REGNO (x
));
849 fmt
= GET_RTX_FORMAT (code
);
850 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
852 setup_ref_regs (XEXP (x
, i
));
853 else if (fmt
[i
] == 'E')
855 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
856 setup_ref_regs (XVECEXP (x
, i
, j
));
860 /* Initiate current register pressure related info at the start of
863 initiate_bb_reg_pressure_info (basic_block bb
)
865 unsigned int i ATTRIBUTE_UNUSED
;
868 if (current_nr_blocks
> 1)
869 FOR_BB_INSNS (bb
, insn
)
870 if (NONDEBUG_INSN_P (insn
))
871 setup_ref_regs (PATTERN (insn
));
872 initiate_reg_pressure_info (df_get_live_in (bb
));
873 #ifdef EH_RETURN_DATA_REGNO
874 if (bb_has_eh_pred (bb
))
877 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
879 if (regno
== INVALID_REGNUM
)
881 if (! bitmap_bit_p (df_get_live_in (bb
), regno
))
882 mark_regno_birth_or_death (regno
, true);
887 /* Save current register pressure related info. */
889 save_reg_pressure (void)
893 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
894 saved_reg_pressure
[ira_pressure_classes
[i
]]
895 = curr_reg_pressure
[ira_pressure_classes
[i
]];
896 bitmap_copy (saved_reg_live
, curr_reg_live
);
899 /* Restore saved register pressure related info. */
901 restore_reg_pressure (void)
905 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
906 curr_reg_pressure
[ira_pressure_classes
[i
]]
907 = saved_reg_pressure
[ira_pressure_classes
[i
]];
908 bitmap_copy (curr_reg_live
, saved_reg_live
);
911 /* Return TRUE if the register is dying after its USE. */
913 dying_use_p (struct reg_use_data
*use
)
915 struct reg_use_data
*next
;
917 for (next
= use
->next_regno_use
; next
!= use
; next
= next
->next_regno_use
)
918 if (NONDEBUG_INSN_P (next
->insn
)
919 && QUEUE_INDEX (next
->insn
) != QUEUE_SCHEDULED
)
924 /* Print info about the current register pressure and its excess for
925 each pressure class. */
927 print_curr_reg_pressure (void)
932 fprintf (sched_dump
, ";;\t");
933 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
935 cl
= ira_pressure_classes
[i
];
936 gcc_assert (curr_reg_pressure
[cl
] >= 0);
937 fprintf (sched_dump
, " %s:%d(%d)", reg_class_names
[cl
],
938 curr_reg_pressure
[cl
],
939 curr_reg_pressure
[cl
] - ira_available_class_regs
[cl
]);
941 fprintf (sched_dump
, "\n");
944 /* Pointer to the last instruction scheduled. */
945 static rtx last_scheduled_insn
;
947 /* Pointer to the last nondebug instruction scheduled within the
948 block, or the prev_head of the scheduling block. Used by
949 rank_for_schedule, so that insns independent of the last scheduled
950 insn will be preferred over dependent instructions. */
951 static rtx last_nondebug_scheduled_insn
;
953 /* Pointer that iterates through the list of unscheduled insns if we
954 have a dbg_cnt enabled. It always points at an insn prior to the
955 first unscheduled one. */
956 static rtx nonscheduled_insns_begin
;
958 /* Cached cost of the instruction. Use below function to get cost of the
959 insn. -1 here means that the field is not initialized. */
960 #define INSN_COST(INSN) (HID (INSN)->cost)
962 /* Compute cost of executing INSN.
963 This is the number of cycles between instruction issue and
964 instruction results. */
972 if (recog_memoized (insn
) < 0)
975 cost
= insn_default_latency (insn
);
982 cost
= INSN_COST (insn
);
986 /* A USE insn, or something else we don't need to
987 understand. We can't pass these directly to
988 result_ready_cost or insn_default_latency because it will
989 trigger a fatal error for unrecognizable insns. */
990 if (recog_memoized (insn
) < 0)
992 INSN_COST (insn
) = 0;
997 cost
= insn_default_latency (insn
);
1001 INSN_COST (insn
) = cost
;
1008 /* Compute cost of dependence LINK.
1009 This is the number of cycles between instruction issue and
1010 instruction results.
1011 ??? We also use this function to call recog_memoized on all insns. */
1013 dep_cost_1 (dep_t link
, dw_t dw
)
1015 rtx insn
= DEP_PRO (link
);
1016 rtx used
= DEP_CON (link
);
1019 if (DEP_COST (link
) != UNKNOWN_DEP_COST
)
1020 return DEP_COST (link
);
1024 struct delay_pair
*delay_entry
;
1026 = (struct delay_pair
*)htab_find_with_hash (delay_htab_i2
, used
,
1027 htab_hash_pointer (used
));
1030 if (delay_entry
->i1
== insn
)
1032 DEP_COST (link
) = pair_delay (delay_entry
);
1033 return DEP_COST (link
);
1038 /* A USE insn should never require the value used to be computed.
1039 This allows the computation of a function's result and parameter
1040 values to overlap the return and call. We don't care about the
1041 dependence cost when only decreasing register pressure. */
1042 if (recog_memoized (used
) < 0)
1045 recog_memoized (insn
);
1049 enum reg_note dep_type
= DEP_TYPE (link
);
1051 cost
= insn_cost (insn
);
1053 if (INSN_CODE (insn
) >= 0)
1055 if (dep_type
== REG_DEP_ANTI
)
1057 else if (dep_type
== REG_DEP_OUTPUT
)
1059 cost
= (insn_default_latency (insn
)
1060 - insn_default_latency (used
));
1064 else if (bypass_p (insn
))
1065 cost
= insn_latency (insn
, used
);
1069 if (targetm
.sched
.adjust_cost_2
)
1070 cost
= targetm
.sched
.adjust_cost_2 (used
, (int) dep_type
, insn
, cost
,
1072 else if (targetm
.sched
.adjust_cost
!= NULL
)
1074 /* This variable is used for backward compatibility with the
1076 rtx dep_cost_rtx_link
= alloc_INSN_LIST (NULL_RTX
, NULL_RTX
);
1078 /* Make it self-cycled, so that if some tries to walk over this
1079 incomplete list he/she will be caught in an endless loop. */
1080 XEXP (dep_cost_rtx_link
, 1) = dep_cost_rtx_link
;
1082 /* Targets use only REG_NOTE_KIND of the link. */
1083 PUT_REG_NOTE_KIND (dep_cost_rtx_link
, DEP_TYPE (link
));
1085 cost
= targetm
.sched
.adjust_cost (used
, dep_cost_rtx_link
,
1088 free_INSN_LIST_node (dep_cost_rtx_link
);
1095 DEP_COST (link
) = cost
;
1099 /* Compute cost of dependence LINK.
1100 This is the number of cycles between instruction issue and
1101 instruction results. */
1103 dep_cost (dep_t link
)
1105 return dep_cost_1 (link
, 0);
1108 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
1109 INSN_PRIORITY explicitly. */
1111 increase_insn_priority (rtx insn
, int amount
)
1113 if (!sel_sched_p ())
1115 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
1116 if (INSN_PRIORITY_KNOWN (insn
))
1117 INSN_PRIORITY (insn
) += amount
;
1121 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
1122 Use EXPR_PRIORITY instead. */
1123 sel_add_to_insn_priority (insn
, amount
);
1127 /* Return 'true' if DEP should be included in priority calculations. */
1129 contributes_to_priority_p (dep_t dep
)
1131 if (DEBUG_INSN_P (DEP_CON (dep
))
1132 || DEBUG_INSN_P (DEP_PRO (dep
)))
1135 /* Critical path is meaningful in block boundaries only. */
1136 if (!current_sched_info
->contributes_to_priority (DEP_CON (dep
),
1140 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
1141 then speculative instructions will less likely be
1142 scheduled. That is because the priority of
1143 their producers will increase, and, thus, the
1144 producers will more likely be scheduled, thus,
1145 resolving the dependence. */
1146 if (sched_deps_info
->generate_spec_deps
1147 && !(spec_info
->flags
& COUNT_SPEC_IN_CRITICAL_PATH
)
1148 && (DEP_STATUS (dep
) & SPECULATIVE
))
1154 /* Compute the number of nondebug forward deps of an insn. */
1157 dep_list_size (rtx insn
)
1159 sd_iterator_def sd_it
;
1161 int dbgcount
= 0, nodbgcount
= 0;
1163 if (!MAY_HAVE_DEBUG_INSNS
)
1164 return sd_lists_size (insn
, SD_LIST_FORW
);
1166 FOR_EACH_DEP (insn
, SD_LIST_FORW
, sd_it
, dep
)
1168 if (DEBUG_INSN_P (DEP_CON (dep
)))
1170 else if (!DEBUG_INSN_P (DEP_PRO (dep
)))
1174 gcc_assert (dbgcount
+ nodbgcount
== sd_lists_size (insn
, SD_LIST_FORW
));
1179 /* Compute the priority number for INSN. */
1183 if (! INSN_P (insn
))
1186 /* We should not be interested in priority of an already scheduled insn. */
1187 gcc_assert (QUEUE_INDEX (insn
) != QUEUE_SCHEDULED
);
1189 if (!INSN_PRIORITY_KNOWN (insn
))
1191 int this_priority
= -1;
1193 if (dep_list_size (insn
) == 0)
1194 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
1195 some forward deps but all of them are ignored by
1196 contributes_to_priority hook. At the moment we set priority of
1198 this_priority
= insn_cost (insn
);
1201 rtx prev_first
, twin
;
1204 /* For recovery check instructions we calculate priority slightly
1205 different than that of normal instructions. Instead of walking
1206 through INSN_FORW_DEPS (check) list, we walk through
1207 INSN_FORW_DEPS list of each instruction in the corresponding
1210 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
1211 rec
= sel_sched_p () ? NULL
: RECOVERY_BLOCK (insn
);
1212 if (!rec
|| rec
== EXIT_BLOCK_PTR
)
1214 prev_first
= PREV_INSN (insn
);
1219 prev_first
= NEXT_INSN (BB_HEAD (rec
));
1220 twin
= PREV_INSN (BB_END (rec
));
1225 sd_iterator_def sd_it
;
1228 FOR_EACH_DEP (twin
, SD_LIST_FORW
, sd_it
, dep
)
1233 next
= DEP_CON (dep
);
1235 if (BLOCK_FOR_INSN (next
) != rec
)
1239 if (!contributes_to_priority_p (dep
))
1243 cost
= dep_cost (dep
);
1246 struct _dep _dep1
, *dep1
= &_dep1
;
1248 init_dep (dep1
, insn
, next
, REG_DEP_ANTI
);
1250 cost
= dep_cost (dep1
);
1253 next_priority
= cost
+ priority (next
);
1255 if (next_priority
> this_priority
)
1256 this_priority
= next_priority
;
1260 twin
= PREV_INSN (twin
);
1262 while (twin
!= prev_first
);
1265 if (this_priority
< 0)
1267 gcc_assert (this_priority
== -1);
1269 this_priority
= insn_cost (insn
);
1272 INSN_PRIORITY (insn
) = this_priority
;
1273 INSN_PRIORITY_STATUS (insn
) = 1;
1276 return INSN_PRIORITY (insn
);
1279 /* Macros and functions for keeping the priority queue sorted, and
1280 dealing with queuing and dequeuing of instructions. */
1282 #define SCHED_SORT(READY, N_READY) \
1283 do { if ((N_READY) == 2) \
1284 swap_sort (READY, N_READY); \
1285 else if ((N_READY) > 2) \
1286 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
1289 /* Setup info about the current register pressure impact of scheduling
1290 INSN at the current scheduling point. */
1292 setup_insn_reg_pressure_info (rtx insn
)
1294 int i
, change
, before
, after
, hard_regno
;
1295 int excess_cost_change
;
1296 enum machine_mode mode
;
1298 struct reg_pressure_data
*pressure_info
;
1299 int *max_reg_pressure
;
1300 struct reg_use_data
*use
;
1301 static int death
[N_REG_CLASSES
];
1303 gcc_checking_assert (!DEBUG_INSN_P (insn
));
1305 excess_cost_change
= 0;
1306 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1307 death
[ira_pressure_classes
[i
]] = 0;
1308 for (use
= INSN_REG_USE_LIST (insn
); use
!= NULL
; use
= use
->next_insn_use
)
1309 if (dying_use_p (use
))
1311 cl
= sched_regno_pressure_class
[use
->regno
];
1312 if (use
->regno
< FIRST_PSEUDO_REGISTER
)
1316 += ira_reg_class_max_nregs
[cl
][PSEUDO_REGNO_MODE (use
->regno
)];
1318 pressure_info
= INSN_REG_PRESSURE (insn
);
1319 max_reg_pressure
= INSN_MAX_REG_PRESSURE (insn
);
1320 gcc_assert (pressure_info
!= NULL
&& max_reg_pressure
!= NULL
);
1321 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1323 cl
= ira_pressure_classes
[i
];
1324 gcc_assert (curr_reg_pressure
[cl
] >= 0);
1325 change
= (int) pressure_info
[i
].set_increase
- death
[cl
];
1326 before
= MAX (0, max_reg_pressure
[i
] - ira_available_class_regs
[cl
]);
1327 after
= MAX (0, max_reg_pressure
[i
] + change
1328 - ira_available_class_regs
[cl
]);
1329 hard_regno
= ira_class_hard_regs
[cl
][0];
1330 gcc_assert (hard_regno
>= 0);
1331 mode
= reg_raw_mode
[hard_regno
];
1332 excess_cost_change
+= ((after
- before
)
1333 * (ira_memory_move_cost
[mode
][cl
][0]
1334 + ira_memory_move_cost
[mode
][cl
][1]));
1336 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn
) = excess_cost_change
;
1339 /* Returns a positive value if x is preferred; returns a negative value if
1340 y is preferred. Should never return 0, since that will make the sort
1344 rank_for_schedule (const void *x
, const void *y
)
1346 rtx tmp
= *(const rtx
*) y
;
1347 rtx tmp2
= *(const rtx
*) x
;
1348 int tmp_class
, tmp2_class
;
1349 int val
, priority_val
, info_val
;
1351 if (MAY_HAVE_DEBUG_INSNS
)
1353 /* Schedule debug insns as early as possible. */
1354 if (DEBUG_INSN_P (tmp
) && !DEBUG_INSN_P (tmp2
))
1356 else if (DEBUG_INSN_P (tmp2
))
1360 /* The insn in a schedule group should be issued the first. */
1361 if (flag_sched_group_heuristic
&&
1362 SCHED_GROUP_P (tmp
) != SCHED_GROUP_P (tmp2
))
1363 return SCHED_GROUP_P (tmp2
) ? 1 : -1;
1365 /* Make sure that priority of TMP and TMP2 are initialized. */
1366 gcc_assert (INSN_PRIORITY_KNOWN (tmp
) && INSN_PRIORITY_KNOWN (tmp2
));
1368 if (sched_pressure_p
)
1372 /* Prefer insn whose scheduling results in the smallest register
1374 if ((diff
= (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp
)
1375 + (INSN_TICK (tmp
) > clock_var
1376 ? INSN_TICK (tmp
) - clock_var
: 0)
1377 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2
)
1378 - (INSN_TICK (tmp2
) > clock_var
1379 ? INSN_TICK (tmp2
) - clock_var
: 0))) != 0)
1384 if (sched_pressure_p
1385 && (INSN_TICK (tmp2
) > clock_var
|| INSN_TICK (tmp
) > clock_var
))
1387 if (INSN_TICK (tmp
) <= clock_var
)
1389 else if (INSN_TICK (tmp2
) <= clock_var
)
1392 return INSN_TICK (tmp
) - INSN_TICK (tmp2
);
1395 /* If we are doing backtracking in this schedule, prefer insns that
1396 have forward dependencies with negative cost against an insn that
1397 was already scheduled. */
1398 if (current_sched_info
->flags
& DO_BACKTRACKING
)
1400 priority_val
= FEEDS_BACKTRACK_INSN (tmp2
) - FEEDS_BACKTRACK_INSN (tmp
);
1402 return priority_val
;
1405 /* Prefer insn with higher priority. */
1406 priority_val
= INSN_PRIORITY (tmp2
) - INSN_PRIORITY (tmp
);
1408 if (flag_sched_critical_path_heuristic
&& priority_val
)
1409 return priority_val
;
1411 /* Prefer speculative insn with greater dependencies weakness. */
1412 if (flag_sched_spec_insn_heuristic
&& spec_info
)
1418 ds1
= TODO_SPEC (tmp
) & SPECULATIVE
;
1420 dw1
= ds_weak (ds1
);
1424 ds2
= TODO_SPEC (tmp2
) & SPECULATIVE
;
1426 dw2
= ds_weak (ds2
);
1431 if (dw
> (NO_DEP_WEAK
/ 8) || dw
< -(NO_DEP_WEAK
/ 8))
1435 info_val
= (*current_sched_info
->rank
) (tmp
, tmp2
);
1436 if(flag_sched_rank_heuristic
&& info_val
)
1439 /* Compare insns based on their relation to the last scheduled
1441 if (flag_sched_last_insn_heuristic
&& last_nondebug_scheduled_insn
)
1445 rtx last
= last_nondebug_scheduled_insn
;
1447 /* Classify the instructions into three classes:
1448 1) Data dependent on last schedule insn.
1449 2) Anti/Output dependent on last scheduled insn.
1450 3) Independent of last scheduled insn, or has latency of one.
1451 Choose the insn from the highest numbered class if different. */
1452 dep1
= sd_find_dep_between (last
, tmp
, true);
1454 if (dep1
== NULL
|| dep_cost (dep1
) == 1)
1456 else if (/* Data dependence. */
1457 DEP_TYPE (dep1
) == REG_DEP_TRUE
)
1462 dep2
= sd_find_dep_between (last
, tmp2
, true);
1464 if (dep2
== NULL
|| dep_cost (dep2
) == 1)
1466 else if (/* Data dependence. */
1467 DEP_TYPE (dep2
) == REG_DEP_TRUE
)
1472 if ((val
= tmp2_class
- tmp_class
))
1476 /* Prefer the insn which has more later insns that depend on it.
1477 This gives the scheduler more freedom when scheduling later
1478 instructions at the expense of added register pressure. */
1480 val
= (dep_list_size (tmp2
) - dep_list_size (tmp
));
1482 if (flag_sched_dep_count_heuristic
&& val
!= 0)
1485 /* If insns are equally good, sort by INSN_LUID (original insn order),
1486 so that we make the sort stable. This minimizes instruction movement,
1487 thus minimizing sched's effect on debugging and cross-jumping. */
1488 return INSN_LUID (tmp
) - INSN_LUID (tmp2
);
1491 /* Resort the array A in which only element at index N may be out of order. */
1493 HAIFA_INLINE
static void
1494 swap_sort (rtx
*a
, int n
)
1496 rtx insn
= a
[n
- 1];
1499 while (i
>= 0 && rank_for_schedule (a
+ i
, &insn
) >= 0)
1507 /* Add INSN to the insn queue so that it can be executed at least
1508 N_CYCLES after the currently executing insn. Preserve insns
1509 chain for debugging purposes. REASON will be printed in debugging
1512 HAIFA_INLINE
static void
1513 queue_insn (rtx insn
, int n_cycles
, const char *reason
)
1515 int next_q
= NEXT_Q_AFTER (q_ptr
, n_cycles
);
1516 rtx link
= alloc_INSN_LIST (insn
, insn_queue
[next_q
]);
1519 gcc_assert (n_cycles
<= max_insn_queue_index
);
1520 gcc_assert (!DEBUG_INSN_P (insn
));
1522 insn_queue
[next_q
] = link
;
1525 if (sched_verbose
>= 2)
1527 fprintf (sched_dump
, ";;\t\tReady-->Q: insn %s: ",
1528 (*current_sched_info
->print_insn
) (insn
, 0));
1530 fprintf (sched_dump
, "queued for %d cycles (%s).\n", n_cycles
, reason
);
1533 QUEUE_INDEX (insn
) = next_q
;
1535 if (current_sched_info
->flags
& DO_BACKTRACKING
)
1537 new_tick
= clock_var
+ n_cycles
;
1538 if (INSN_TICK (insn
) == INVALID_TICK
|| INSN_TICK (insn
) < new_tick
)
1539 INSN_TICK (insn
) = new_tick
;
1541 if (INSN_EXACT_TICK (insn
) != INVALID_TICK
1542 && INSN_EXACT_TICK (insn
) < clock_var
+ n_cycles
)
1544 must_backtrack
= true;
1545 if (sched_verbose
>= 2)
1546 fprintf (sched_dump
, ";;\t\tcausing a backtrack.\n");
1551 /* Remove INSN from queue. */
1553 queue_remove (rtx insn
)
1555 gcc_assert (QUEUE_INDEX (insn
) >= 0);
1556 remove_free_INSN_LIST_elem (insn
, &insn_queue
[QUEUE_INDEX (insn
)]);
1558 QUEUE_INDEX (insn
) = QUEUE_NOWHERE
;
1561 /* Return a pointer to the bottom of the ready list, i.e. the insn
1562 with the lowest priority. */
1565 ready_lastpos (struct ready_list
*ready
)
1567 gcc_assert (ready
->n_ready
>= 1);
1568 return ready
->vec
+ ready
->first
- ready
->n_ready
+ 1;
1571 /* Add an element INSN to the ready list so that it ends up with the
1572 lowest/highest priority depending on FIRST_P. */
1574 HAIFA_INLINE
static void
1575 ready_add (struct ready_list
*ready
, rtx insn
, bool first_p
)
1579 if (ready
->first
== ready
->n_ready
)
1581 memmove (ready
->vec
+ ready
->veclen
- ready
->n_ready
,
1582 ready_lastpos (ready
),
1583 ready
->n_ready
* sizeof (rtx
));
1584 ready
->first
= ready
->veclen
- 1;
1586 ready
->vec
[ready
->first
- ready
->n_ready
] = insn
;
1590 if (ready
->first
== ready
->veclen
- 1)
1593 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1594 memmove (ready
->vec
+ ready
->veclen
- ready
->n_ready
- 1,
1595 ready_lastpos (ready
),
1596 ready
->n_ready
* sizeof (rtx
));
1597 ready
->first
= ready
->veclen
- 2;
1599 ready
->vec
[++(ready
->first
)] = insn
;
1603 if (DEBUG_INSN_P (insn
))
1606 gcc_assert (QUEUE_INDEX (insn
) != QUEUE_READY
);
1607 QUEUE_INDEX (insn
) = QUEUE_READY
;
1609 if (INSN_EXACT_TICK (insn
) != INVALID_TICK
1610 && INSN_EXACT_TICK (insn
) < clock_var
)
1612 must_backtrack
= true;
1616 /* Remove the element with the highest priority from the ready list and
1619 HAIFA_INLINE
static rtx
1620 ready_remove_first (struct ready_list
*ready
)
1624 gcc_assert (ready
->n_ready
);
1625 t
= ready
->vec
[ready
->first
--];
1627 if (DEBUG_INSN_P (t
))
1629 /* If the queue becomes empty, reset it. */
1630 if (ready
->n_ready
== 0)
1631 ready
->first
= ready
->veclen
- 1;
1633 gcc_assert (QUEUE_INDEX (t
) == QUEUE_READY
);
1634 QUEUE_INDEX (t
) = QUEUE_NOWHERE
;
1639 /* The following code implements multi-pass scheduling for the first
1640 cycle. In other words, we will try to choose ready insn which
1641 permits to start maximum number of insns on the same cycle. */
1643 /* Return a pointer to the element INDEX from the ready. INDEX for
1644 insn with the highest priority is 0, and the lowest priority has
1648 ready_element (struct ready_list
*ready
, int index
)
1650 gcc_assert (ready
->n_ready
&& index
< ready
->n_ready
);
1652 return ready
->vec
[ready
->first
- index
];
1655 /* Remove the element INDEX from the ready list and return it. INDEX
1656 for insn with the highest priority is 0, and the lowest priority
1659 HAIFA_INLINE
static rtx
1660 ready_remove (struct ready_list
*ready
, int index
)
1666 return ready_remove_first (ready
);
1667 gcc_assert (ready
->n_ready
&& index
< ready
->n_ready
);
1668 t
= ready
->vec
[ready
->first
- index
];
1670 if (DEBUG_INSN_P (t
))
1672 for (i
= index
; i
< ready
->n_ready
; i
++)
1673 ready
->vec
[ready
->first
- i
] = ready
->vec
[ready
->first
- i
- 1];
1674 QUEUE_INDEX (t
) = QUEUE_NOWHERE
;
1678 /* Remove INSN from the ready list. */
1680 ready_remove_insn (rtx insn
)
1684 for (i
= 0; i
< readyp
->n_ready
; i
++)
1685 if (ready_element (readyp
, i
) == insn
)
1687 ready_remove (readyp
, i
);
1693 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1697 ready_sort (struct ready_list
*ready
)
1700 rtx
*first
= ready_lastpos (ready
);
1702 if (sched_pressure_p
)
1704 for (i
= 0; i
< ready
->n_ready
; i
++)
1705 if (!DEBUG_INSN_P (first
[i
]))
1706 setup_insn_reg_pressure_info (first
[i
]);
1708 SCHED_SORT (first
, ready
->n_ready
);
1711 /* PREV is an insn that is ready to execute. Adjust its priority if that
1712 will help shorten or lengthen register lifetimes as appropriate. Also
1713 provide a hook for the target to tweak itself. */
1715 HAIFA_INLINE
static void
1716 adjust_priority (rtx prev
)
1718 /* ??? There used to be code here to try and estimate how an insn
1719 affected register lifetimes, but it did it by looking at REG_DEAD
1720 notes, which we removed in schedule_region. Nor did it try to
1721 take into account register pressure or anything useful like that.
1723 Revisit when we have a machine model to work with and not before. */
1725 if (targetm
.sched
.adjust_priority
)
1726 INSN_PRIORITY (prev
) =
1727 targetm
.sched
.adjust_priority (prev
, INSN_PRIORITY (prev
));
1730 /* Advance DFA state STATE on one cycle. */
1732 advance_state (state_t state
)
1734 if (targetm
.sched
.dfa_pre_advance_cycle
)
1735 targetm
.sched
.dfa_pre_advance_cycle ();
1737 if (targetm
.sched
.dfa_pre_cycle_insn
)
1738 state_transition (state
,
1739 targetm
.sched
.dfa_pre_cycle_insn ());
1741 state_transition (state
, NULL
);
1743 if (targetm
.sched
.dfa_post_cycle_insn
)
1744 state_transition (state
,
1745 targetm
.sched
.dfa_post_cycle_insn ());
1747 if (targetm
.sched
.dfa_post_advance_cycle
)
1748 targetm
.sched
.dfa_post_advance_cycle ();
1751 /* Advance time on one cycle. */
1752 HAIFA_INLINE
static void
1753 advance_one_cycle (void)
1755 advance_state (curr_state
);
1756 if (sched_verbose
>= 6)
1757 fprintf (sched_dump
, ";;\tAdvanced a state.\n");
1760 /* Update register pressure after scheduling INSN. */
1762 update_register_pressure (rtx insn
)
1764 struct reg_use_data
*use
;
1765 struct reg_set_data
*set
;
1767 gcc_checking_assert (!DEBUG_INSN_P (insn
));
1769 for (use
= INSN_REG_USE_LIST (insn
); use
!= NULL
; use
= use
->next_insn_use
)
1770 if (dying_use_p (use
) && bitmap_bit_p (curr_reg_live
, use
->regno
))
1771 mark_regno_birth_or_death (use
->regno
, false);
1772 for (set
= INSN_REG_SET_LIST (insn
); set
!= NULL
; set
= set
->next_insn_set
)
1773 mark_regno_birth_or_death (set
->regno
, true);
1776 /* Set up or update (if UPDATE_P) max register pressure (see its
1777 meaning in sched-int.h::_haifa_insn_data) for all current BB insns
1778 after insn AFTER. */
1780 setup_insn_max_reg_pressure (rtx after
, bool update_p
)
1785 static int max_reg_pressure
[N_REG_CLASSES
];
1787 save_reg_pressure ();
1788 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1789 max_reg_pressure
[ira_pressure_classes
[i
]]
1790 = curr_reg_pressure
[ira_pressure_classes
[i
]];
1791 for (insn
= NEXT_INSN (after
);
1792 insn
!= NULL_RTX
&& ! BARRIER_P (insn
)
1793 && BLOCK_FOR_INSN (insn
) == BLOCK_FOR_INSN (after
);
1794 insn
= NEXT_INSN (insn
))
1795 if (NONDEBUG_INSN_P (insn
))
1798 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1800 p
= max_reg_pressure
[ira_pressure_classes
[i
]];
1801 if (INSN_MAX_REG_PRESSURE (insn
)[i
] != p
)
1804 INSN_MAX_REG_PRESSURE (insn
)[i
]
1805 = max_reg_pressure
[ira_pressure_classes
[i
]];
1808 if (update_p
&& eq_p
)
1810 update_register_pressure (insn
);
1811 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1812 if (max_reg_pressure
[ira_pressure_classes
[i
]]
1813 < curr_reg_pressure
[ira_pressure_classes
[i
]])
1814 max_reg_pressure
[ira_pressure_classes
[i
]]
1815 = curr_reg_pressure
[ira_pressure_classes
[i
]];
1817 restore_reg_pressure ();
1820 /* Update the current register pressure after scheduling INSN. Update
1821 also max register pressure for unscheduled insns of the current
1824 update_reg_and_insn_max_reg_pressure (rtx insn
)
1827 int before
[N_REG_CLASSES
];
1829 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1830 before
[i
] = curr_reg_pressure
[ira_pressure_classes
[i
]];
1831 update_register_pressure (insn
);
1832 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1833 if (curr_reg_pressure
[ira_pressure_classes
[i
]] != before
[i
])
1835 if (i
< ira_pressure_classes_num
)
1836 setup_insn_max_reg_pressure (insn
, true);
1839 /* Set up register pressure at the beginning of basic block BB whose
1840 insns starting after insn AFTER. Set up also max register pressure
1841 for all insns of the basic block. */
1843 sched_setup_bb_reg_pressure_info (basic_block bb
, rtx after
)
1845 gcc_assert (sched_pressure_p
);
1846 initiate_bb_reg_pressure_info (bb
);
1847 setup_insn_max_reg_pressure (after
, false);
1850 /* A structure that holds local state for the loop in schedule_block. */
1851 struct sched_block_state
1853 /* True if no real insns have been scheduled in the current cycle. */
1854 bool first_cycle_insn_p
;
1855 /* True if a shadow insn has been scheduled in the current cycle, which
1856 means that no more normal insns can be issued. */
1857 bool shadows_only_p
;
1858 /* Initialized with the machine's issue rate every cycle, and updated
1859 by calls to the variable_issue hook. */
1863 /* INSN is the "currently executing insn". Launch each insn which was
1864 waiting on INSN. READY is the ready list which contains the insns
1865 that are ready to fire. CLOCK is the current cycle. The function
1866 returns necessary cycle advance after issuing the insn (it is not
1867 zero for insns in a schedule group). */
1870 schedule_insn (rtx insn
)
1872 sd_iterator_def sd_it
;
1877 if (sched_verbose
>= 1)
1879 struct reg_pressure_data
*pressure_info
;
1882 print_insn (buf
, insn
, 0);
1884 fprintf (sched_dump
, ";;\t%3i--> %-40s:", clock_var
, buf
);
1886 if (recog_memoized (insn
) < 0)
1887 fprintf (sched_dump
, "nothing");
1889 print_reservation (sched_dump
, insn
);
1890 pressure_info
= INSN_REG_PRESSURE (insn
);
1891 if (pressure_info
!= NULL
)
1893 fputc (':', sched_dump
);
1894 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1895 fprintf (sched_dump
, "%s%+d(%d)",
1896 reg_class_names
[ira_pressure_classes
[i
]],
1897 pressure_info
[i
].set_increase
, pressure_info
[i
].change
);
1899 fputc ('\n', sched_dump
);
1902 if (sched_pressure_p
&& !DEBUG_INSN_P (insn
))
1903 update_reg_and_insn_max_reg_pressure (insn
);
1905 /* Scheduling instruction should have all its dependencies resolved and
1906 should have been removed from the ready list. */
1907 gcc_assert (sd_lists_empty_p (insn
, SD_LIST_BACK
));
1909 /* Reset debug insns invalidated by moving this insn. */
1910 if (MAY_HAVE_DEBUG_INSNS
&& !DEBUG_INSN_P (insn
))
1911 for (sd_it
= sd_iterator_start (insn
, SD_LIST_BACK
);
1912 sd_iterator_cond (&sd_it
, &dep
);)
1914 rtx dbg
= DEP_PRO (dep
);
1915 struct reg_use_data
*use
, *next
;
1917 gcc_assert (DEBUG_INSN_P (dbg
));
1919 if (sched_verbose
>= 6)
1920 fprintf (sched_dump
, ";;\t\tresetting: debug insn %d\n",
1923 /* ??? Rather than resetting the debug insn, we might be able
1924 to emit a debug temp before the just-scheduled insn, but
1925 this would involve checking that the expression at the
1926 point of the debug insn is equivalent to the expression
1927 before the just-scheduled insn. They might not be: the
1928 expression in the debug insn may depend on other insns not
1929 yet scheduled that set MEMs, REGs or even other debug
1930 insns. It's not clear that attempting to preserve debug
1931 information in these cases is worth the effort, given how
1932 uncommon these resets are and the likelihood that the debug
1933 temps introduced won't survive the schedule change. */
1934 INSN_VAR_LOCATION_LOC (dbg
) = gen_rtx_UNKNOWN_VAR_LOC ();
1935 df_insn_rescan (dbg
);
1937 /* Unknown location doesn't use any registers. */
1938 for (use
= INSN_REG_USE_LIST (dbg
); use
!= NULL
; use
= next
)
1940 struct reg_use_data
*prev
= use
;
1942 /* Remove use from the cyclic next_regno_use chain first. */
1943 while (prev
->next_regno_use
!= use
)
1944 prev
= prev
->next_regno_use
;
1945 prev
->next_regno_use
= use
->next_regno_use
;
1946 next
= use
->next_insn_use
;
1949 INSN_REG_USE_LIST (dbg
) = NULL
;
1951 /* We delete rather than resolve these deps, otherwise we
1952 crash in sched_free_deps(), because forward deps are
1953 expected to be released before backward deps. */
1954 sd_delete_dep (sd_it
);
1957 gcc_assert (QUEUE_INDEX (insn
) == QUEUE_NOWHERE
);
1958 QUEUE_INDEX (insn
) = QUEUE_SCHEDULED
;
1960 gcc_assert (INSN_TICK (insn
) >= MIN_TICK
);
1961 if (INSN_TICK (insn
) > clock_var
)
1962 /* INSN has been prematurely moved from the queue to the ready list.
1963 This is possible only if following flag is set. */
1964 gcc_assert (flag_sched_stalled_insns
);
1966 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1967 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1968 INSN_TICK (insn
) = clock_var
;
1970 /* Update dependent instructions. */
1971 for (sd_it
= sd_iterator_start (insn
, SD_LIST_FORW
);
1972 sd_iterator_cond (&sd_it
, &dep
);)
1974 rtx next
= DEP_CON (dep
);
1976 /* Resolve the dependence between INSN and NEXT.
1977 sd_resolve_dep () moves current dep to another list thus
1978 advancing the iterator. */
1979 sd_resolve_dep (sd_it
);
1981 /* Don't bother trying to mark next as ready if insn is a debug
1982 insn. If insn is the last hard dependency, it will have
1983 already been discounted. */
1984 if (DEBUG_INSN_P (insn
) && !DEBUG_INSN_P (next
))
1987 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn
))
1991 effective_cost
= try_ready (next
);
1993 if (effective_cost
>= 0
1994 && SCHED_GROUP_P (next
)
1995 && advance
< effective_cost
)
1996 advance
= effective_cost
;
1999 /* Check always has only one forward dependence (to the first insn in
2000 the recovery block), therefore, this will be executed only once. */
2002 gcc_assert (sd_lists_empty_p (insn
, SD_LIST_FORW
));
2003 fix_recovery_deps (RECOVERY_BLOCK (insn
));
2007 /* Annotate the instruction with issue information -- TImode
2008 indicates that the instruction is expected not to be able
2009 to issue on the same cycle as the previous insn. A machine
2010 may use this information to decide how the instruction should
2013 && GET_CODE (PATTERN (insn
)) != USE
2014 && GET_CODE (PATTERN (insn
)) != CLOBBER
2015 && !DEBUG_INSN_P (insn
))
2017 if (reload_completed
)
2018 PUT_MODE (insn
, clock_var
> last_clock_var
? TImode
: VOIDmode
);
2019 last_clock_var
= clock_var
;
2025 /* Functions for handling of notes. */
2027 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
2029 concat_note_lists (rtx from_end
, rtx
*to_endp
)
2033 /* It's easy when have nothing to concat. */
2034 if (from_end
== NULL
)
2037 /* It's also easy when destination is empty. */
2038 if (*to_endp
== NULL
)
2040 *to_endp
= from_end
;
2044 from_start
= from_end
;
2045 while (PREV_INSN (from_start
) != NULL
)
2046 from_start
= PREV_INSN (from_start
);
2048 PREV_INSN (from_start
) = *to_endp
;
2049 NEXT_INSN (*to_endp
) = from_start
;
2050 *to_endp
= from_end
;
2053 /* Delete notes between HEAD and TAIL and put them in the chain
2054 of notes ended by NOTE_LIST. */
2056 remove_notes (rtx head
, rtx tail
)
2058 rtx next_tail
, insn
, next
;
2061 if (head
== tail
&& !INSN_P (head
))
2064 next_tail
= NEXT_INSN (tail
);
2065 for (insn
= head
; insn
!= next_tail
; insn
= next
)
2067 next
= NEXT_INSN (insn
);
2071 switch (NOTE_KIND (insn
))
2073 case NOTE_INSN_BASIC_BLOCK
:
2076 case NOTE_INSN_EPILOGUE_BEG
:
2080 add_reg_note (next
, REG_SAVE_NOTE
,
2081 GEN_INT (NOTE_INSN_EPILOGUE_BEG
));
2089 /* Add the note to list that ends at NOTE_LIST. */
2090 PREV_INSN (insn
) = note_list
;
2091 NEXT_INSN (insn
) = NULL_RTX
;
2093 NEXT_INSN (note_list
) = insn
;
2098 gcc_assert ((sel_sched_p () || insn
!= tail
) && insn
!= head
);
2102 /* A structure to record enough data to allow us to backtrack the scheduler to
2103 a previous state. */
2104 struct haifa_saved_data
2106 /* Next entry on the list. */
2107 struct haifa_saved_data
*next
;
2109 /* Backtracking is associated with scheduling insns that have delay slots.
2110 DELAY_PAIR points to the structure that contains the insns involved, and
2111 the number of cycles between them. */
2112 struct delay_pair
*delay_pair
;
2114 /* Data used by the frontend (e.g. sched-ebb or sched-rgn). */
2115 void *fe_saved_data
;
2116 /* Data used by the backend. */
2117 void *be_saved_data
;
2119 /* Copies of global state. */
2120 int clock_var
, last_clock_var
;
2121 struct ready_list ready
;
2124 rtx last_scheduled_insn
;
2125 rtx last_nondebug_scheduled_insn
;
2126 int cycle_issued_insns
;
2128 /* Copies of state used in the inner loop of schedule_block. */
2129 struct sched_block_state sched_block
;
2131 /* We don't need to save q_ptr, as its value is arbitrary and we can set it
2132 to 0 when restoring. */
2137 /* A record, in reverse order, of all scheduled insns which have delay slots
2138 and may require backtracking. */
2139 static struct haifa_saved_data
*backtrack_queue
;
2141 /* For every dependency of INSN, set the FEEDS_BACKTRACK_INSN bit according
2144 mark_backtrack_feeds (rtx insn
, int set_p
)
2146 sd_iterator_def sd_it
;
2148 FOR_EACH_DEP (insn
, SD_LIST_HARD_BACK
, sd_it
, dep
)
2150 FEEDS_BACKTRACK_INSN (DEP_PRO (dep
)) = set_p
;
2154 /* Make a copy of the INSN_LIST list LINK and return it. */
2156 copy_insn_list (rtx link
)
2159 rtx
*pqueue
= &new_queue
;
2161 for (; link
; link
= XEXP (link
, 1))
2163 rtx x
= XEXP (link
, 0);
2164 rtx newlink
= alloc_INSN_LIST (x
, NULL
);
2166 pqueue
= &XEXP (newlink
, 1);
2172 /* Save the current scheduler state so that we can backtrack to it
2173 later if necessary. PAIR gives the insns that make it necessary to
2174 save this point. SCHED_BLOCK is the local state of schedule_block
2175 that need to be saved. */
2177 save_backtrack_point (struct delay_pair
*pair
,
2178 struct sched_block_state sched_block
)
2181 struct haifa_saved_data
*save
= XNEW (struct haifa_saved_data
);
2183 save
->curr_state
= xmalloc (dfa_state_size
);
2184 memcpy (save
->curr_state
, curr_state
, dfa_state_size
);
2186 save
->ready
.first
= ready
.first
;
2187 save
->ready
.n_ready
= ready
.n_ready
;
2188 save
->ready
.n_debug
= ready
.n_debug
;
2189 save
->ready
.veclen
= ready
.veclen
;
2190 save
->ready
.vec
= XNEWVEC (rtx
, ready
.veclen
);
2191 memcpy (save
->ready
.vec
, ready
.vec
, ready
.veclen
* sizeof (rtx
));
2193 save
->insn_queue
= XNEWVEC (rtx
, max_insn_queue_index
+ 1);
2194 save
->q_size
= q_size
;
2195 for (i
= 0; i
<= max_insn_queue_index
; i
++)
2197 int q
= NEXT_Q_AFTER (q_ptr
, i
);
2198 save
->insn_queue
[i
] = copy_insn_list (insn_queue
[q
]);
2201 save
->clock_var
= clock_var
;
2202 save
->last_clock_var
= last_clock_var
;
2203 save
->cycle_issued_insns
= cycle_issued_insns
;
2204 save
->last_scheduled_insn
= last_scheduled_insn
;
2205 save
->last_nondebug_scheduled_insn
= last_nondebug_scheduled_insn
;
2207 save
->sched_block
= sched_block
;
2209 if (current_sched_info
->save_state
)
2210 save
->fe_saved_data
= (*current_sched_info
->save_state
) ();
2212 if (targetm
.sched
.alloc_sched_context
)
2214 save
->be_saved_data
= targetm
.sched
.alloc_sched_context ();
2215 targetm
.sched
.init_sched_context (save
->be_saved_data
, false);
2218 save
->be_saved_data
= NULL
;
2220 save
->delay_pair
= pair
;
2222 save
->next
= backtrack_queue
;
2223 backtrack_queue
= save
;
2227 mark_backtrack_feeds (pair
->i2
, 1);
2228 INSN_TICK (pair
->i2
) = INVALID_TICK
;
2229 INSN_EXACT_TICK (pair
->i2
) = clock_var
+ pair_delay (pair
);
2230 SHADOW_P (pair
->i2
) = true;
2231 pair
= pair
->next_same_i1
;
2235 /* Pop entries from the SCHEDULED_INSNS vector up to and including INSN.
2236 Restore their dependencies to an unresolved state, and mark them as
2240 unschedule_insns_until (rtx insn
)
2245 sd_iterator_def sd_it
;
2248 last
= VEC_pop (rtx
, scheduled_insns
);
2250 /* This will be changed by restore_backtrack_point if the insn is in
2252 QUEUE_INDEX (last
) = QUEUE_NOWHERE
;
2254 INSN_TICK (last
) = INVALID_TICK
;
2256 for (sd_it
= sd_iterator_start (last
, SD_LIST_RES_FORW
);
2257 sd_iterator_cond (&sd_it
, &dep
);)
2259 rtx con
= DEP_CON (dep
);
2260 TODO_SPEC (con
) |= HARD_DEP
;
2261 INSN_TICK (con
) = INVALID_TICK
;
2262 sd_unresolve_dep (sd_it
);
2270 /* Restore scheduler state from the topmost entry on the backtracking queue.
2271 PSCHED_BLOCK_P points to the local data of schedule_block that we must
2272 overwrite with the saved data.
2273 The caller must already have called unschedule_insns_until. */
2276 restore_last_backtrack_point (struct sched_block_state
*psched_block
)
2281 struct haifa_saved_data
*save
= backtrack_queue
;
2283 backtrack_queue
= save
->next
;
2285 if (current_sched_info
->restore_state
)
2286 (*current_sched_info
->restore_state
) (save
->fe_saved_data
);
2288 if (targetm
.sched
.alloc_sched_context
)
2290 targetm
.sched
.set_sched_context (save
->be_saved_data
);
2291 targetm
.sched
.free_sched_context (save
->be_saved_data
);
2294 /* Clear the QUEUE_INDEX of everything in the ready list or one
2296 if (ready
.n_ready
> 0)
2298 rtx
*first
= ready_lastpos (&ready
);
2299 for (i
= 0; i
< ready
.n_ready
; i
++)
2301 QUEUE_INDEX (first
[i
]) = QUEUE_NOWHERE
;
2302 INSN_TICK (first
[i
]) = INVALID_TICK
;
2305 for (i
= 0; i
<= max_insn_queue_index
; i
++)
2307 int q
= NEXT_Q_AFTER (q_ptr
, i
);
2309 for (link
= insn_queue
[q
]; link
; link
= XEXP (link
, 1))
2311 rtx x
= XEXP (link
, 0);
2312 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
2313 INSN_TICK (x
) = INVALID_TICK
;
2315 free_INSN_LIST_list (&insn_queue
[q
]);
2319 ready
= save
->ready
;
2321 if (ready
.n_ready
> 0)
2323 rtx
*first
= ready_lastpos (&ready
);
2324 for (i
= 0; i
< ready
.n_ready
; i
++)
2326 QUEUE_INDEX (first
[i
]) = QUEUE_READY
;
2327 INSN_TICK (first
[i
]) = save
->clock_var
;
2332 q_size
= save
->q_size
;
2333 for (i
= 0; i
<= max_insn_queue_index
; i
++)
2335 int q
= NEXT_Q_AFTER (q_ptr
, i
);
2337 insn_queue
[q
] = save
->insn_queue
[q
];
2339 for (link
= insn_queue
[q
]; link
; link
= XEXP (link
, 1))
2341 rtx x
= XEXP (link
, 0);
2342 QUEUE_INDEX (x
) = i
;
2343 INSN_TICK (x
) = save
->clock_var
+ i
;
2346 free (save
->insn_queue
);
2348 clock_var
= save
->clock_var
;
2349 last_clock_var
= save
->last_clock_var
;
2350 cycle_issued_insns
= save
->cycle_issued_insns
;
2351 last_scheduled_insn
= save
->last_scheduled_insn
;
2352 last_nondebug_scheduled_insn
= save
->last_nondebug_scheduled_insn
;
2354 *psched_block
= save
->sched_block
;
2356 memcpy (curr_state
, save
->curr_state
, dfa_state_size
);
2357 free (save
->curr_state
);
2359 mark_backtrack_feeds (save
->delay_pair
->i2
, 0);
2363 for (save
= backtrack_queue
; save
; save
= save
->next
)
2365 mark_backtrack_feeds (save
->delay_pair
->i2
, 1);
2369 /* Discard all data associated with the topmost entry in the backtrack
2370 queue. If RESET_TICK is false, we just want to free the data. If true,
2371 we are doing this because we discovered a reason to backtrack. In the
2372 latter case, also reset the INSN_TICK for the shadow insn. */
2374 free_topmost_backtrack_point (bool reset_tick
)
2376 struct haifa_saved_data
*save
= backtrack_queue
;
2379 backtrack_queue
= save
->next
;
2383 struct delay_pair
*pair
= save
->delay_pair
;
2386 INSN_TICK (pair
->i2
) = INVALID_TICK
;
2387 INSN_EXACT_TICK (pair
->i2
) = INVALID_TICK
;
2388 pair
= pair
->next_same_i1
;
2391 if (targetm
.sched
.free_sched_context
)
2392 targetm
.sched
.free_sched_context (save
->be_saved_data
);
2393 if (current_sched_info
->restore_state
)
2394 free (save
->fe_saved_data
);
2395 for (i
= 0; i
<= max_insn_queue_index
; i
++)
2396 free_INSN_LIST_list (&save
->insn_queue
[i
]);
2397 free (save
->insn_queue
);
2398 free (save
->curr_state
);
2399 free (save
->ready
.vec
);
2403 /* Free the entire backtrack queue. */
2405 free_backtrack_queue (void)
2407 while (backtrack_queue
)
2408 free_topmost_backtrack_point (false);
2411 /* Compute INSN_TICK_ESTIMATE for INSN. PROCESSED is a bitmap of
2412 instructions we've previously encountered, a set bit prevents
2413 recursion. BUDGET is a limit on how far ahead we look, it is
2414 reduced on recursive calls. Return true if we produced a good
2415 estimate, or false if we exceeded the budget. */
2417 estimate_insn_tick (bitmap processed
, rtx insn
, int budget
)
2419 sd_iterator_def sd_it
;
2421 int earliest
= INSN_TICK (insn
);
2423 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
2425 rtx pro
= DEP_PRO (dep
);
2428 if (QUEUE_INDEX (pro
) == QUEUE_SCHEDULED
)
2429 gcc_assert (INSN_TICK (pro
) + dep_cost (dep
) <= INSN_TICK (insn
));
2432 int cost
= dep_cost (dep
);
2435 if (!bitmap_bit_p (processed
, INSN_LUID (pro
)))
2437 if (!estimate_insn_tick (processed
, pro
, budget
- cost
))
2440 gcc_assert (INSN_TICK_ESTIMATE (pro
) != INVALID_TICK
);
2441 t
= INSN_TICK_ESTIMATE (pro
) + cost
;
2442 if (earliest
== INVALID_TICK
|| t
> earliest
)
2446 bitmap_set_bit (processed
, INSN_LUID (insn
));
2447 INSN_TICK_ESTIMATE (insn
) = earliest
;
2451 /* Examine the pair of insns in P, and estimate (optimistically, assuming
2452 infinite resources) the cycle in which the delayed shadow can be issued.
2453 Return the number of cycles that must pass before the real insn can be
2454 issued in order to meet this constraint. */
2456 estimate_shadow_tick (struct delay_pair
*p
)
2458 bitmap_head processed
;
2461 bitmap_initialize (&processed
, 0);
2463 cutoff
= !estimate_insn_tick (&processed
, p
->i2
,
2464 max_insn_queue_index
+ pair_delay (p
));
2465 bitmap_clear (&processed
);
2467 return max_insn_queue_index
;
2468 t
= INSN_TICK_ESTIMATE (p
->i2
) - (clock_var
+ pair_delay (p
) + 1);
2474 /* Return the head and tail pointers of ebb starting at BEG and ending
2477 get_ebb_head_tail (basic_block beg
, basic_block end
, rtx
*headp
, rtx
*tailp
)
2479 rtx beg_head
= BB_HEAD (beg
);
2480 rtx beg_tail
= BB_END (beg
);
2481 rtx end_head
= BB_HEAD (end
);
2482 rtx end_tail
= BB_END (end
);
2484 /* Don't include any notes or labels at the beginning of the BEG
2485 basic block, or notes at the end of the END basic blocks. */
2487 if (LABEL_P (beg_head
))
2488 beg_head
= NEXT_INSN (beg_head
);
2490 while (beg_head
!= beg_tail
)
2491 if (NOTE_P (beg_head
))
2492 beg_head
= NEXT_INSN (beg_head
);
2493 else if (DEBUG_INSN_P (beg_head
))
2497 for (note
= NEXT_INSN (beg_head
);
2501 next
= NEXT_INSN (note
);
2504 if (sched_verbose
>= 9)
2505 fprintf (sched_dump
, "reorder %i\n", INSN_UID (note
));
2507 reorder_insns_nobb (note
, note
, PREV_INSN (beg_head
));
2509 if (BLOCK_FOR_INSN (note
) != beg
)
2510 df_insn_change_bb (note
, beg
);
2512 else if (!DEBUG_INSN_P (note
))
2524 end_head
= beg_head
;
2525 else if (LABEL_P (end_head
))
2526 end_head
= NEXT_INSN (end_head
);
2528 while (end_head
!= end_tail
)
2529 if (NOTE_P (end_tail
))
2530 end_tail
= PREV_INSN (end_tail
);
2531 else if (DEBUG_INSN_P (end_tail
))
2535 for (note
= PREV_INSN (end_tail
);
2539 prev
= PREV_INSN (note
);
2542 if (sched_verbose
>= 9)
2543 fprintf (sched_dump
, "reorder %i\n", INSN_UID (note
));
2545 reorder_insns_nobb (note
, note
, end_tail
);
2547 if (end_tail
== BB_END (end
))
2548 BB_END (end
) = note
;
2550 if (BLOCK_FOR_INSN (note
) != end
)
2551 df_insn_change_bb (note
, end
);
2553 else if (!DEBUG_INSN_P (note
))
2565 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
2568 no_real_insns_p (const_rtx head
, const_rtx tail
)
2570 while (head
!= NEXT_INSN (tail
))
2572 if (!NOTE_P (head
) && !LABEL_P (head
))
2574 head
= NEXT_INSN (head
);
2579 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
2580 previously found among the insns. Insert them just before HEAD. */
2582 restore_other_notes (rtx head
, basic_block head_bb
)
2586 rtx note_head
= note_list
;
2589 head_bb
= BLOCK_FOR_INSN (head
);
2591 head
= NEXT_INSN (bb_note (head_bb
));
2593 while (PREV_INSN (note_head
))
2595 set_block_for_insn (note_head
, head_bb
);
2596 note_head
= PREV_INSN (note_head
);
2598 /* In the above cycle we've missed this note. */
2599 set_block_for_insn (note_head
, head_bb
);
2601 PREV_INSN (note_head
) = PREV_INSN (head
);
2602 NEXT_INSN (PREV_INSN (head
)) = note_head
;
2603 PREV_INSN (head
) = note_list
;
2604 NEXT_INSN (note_list
) = head
;
2606 if (BLOCK_FOR_INSN (head
) != head_bb
)
2607 BB_END (head_bb
) = note_list
;
2615 /* Move insns that became ready to fire from queue to ready list. */
2618 queue_to_ready (struct ready_list
*ready
)
2624 q_ptr
= NEXT_Q (q_ptr
);
2626 if (dbg_cnt (sched_insn
) == false)
2628 /* If debug counter is activated do not requeue the first
2629 nonscheduled insn. */
2630 skip_insn
= nonscheduled_insns_begin
;
2633 skip_insn
= next_nonnote_nondebug_insn (skip_insn
);
2635 while (QUEUE_INDEX (skip_insn
) == QUEUE_SCHEDULED
);
2638 skip_insn
= NULL_RTX
;
2640 /* Add all pending insns that can be scheduled without stalls to the
2642 for (link
= insn_queue
[q_ptr
]; link
; link
= XEXP (link
, 1))
2644 insn
= XEXP (link
, 0);
2647 if (sched_verbose
>= 2)
2648 fprintf (sched_dump
, ";;\t\tQ-->Ready: insn %s: ",
2649 (*current_sched_info
->print_insn
) (insn
, 0));
2651 /* If the ready list is full, delay the insn for 1 cycle.
2652 See the comment in schedule_block for the rationale. */
2653 if (!reload_completed
2654 && ready
->n_ready
- ready
->n_debug
> MAX_SCHED_READY_INSNS
2655 && !SCHED_GROUP_P (insn
)
2656 && insn
!= skip_insn
)
2657 queue_insn (insn
, 1, "ready full");
2660 ready_add (ready
, insn
, false);
2661 if (sched_verbose
>= 2)
2662 fprintf (sched_dump
, "moving to ready without stalls\n");
2665 free_INSN_LIST_list (&insn_queue
[q_ptr
]);
2667 /* If there are no ready insns, stall until one is ready and add all
2668 of the pending insns at that point to the ready list. */
2669 if (ready
->n_ready
== 0)
2673 for (stalls
= 1; stalls
<= max_insn_queue_index
; stalls
++)
2675 if ((link
= insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]))
2677 for (; link
; link
= XEXP (link
, 1))
2679 insn
= XEXP (link
, 0);
2682 if (sched_verbose
>= 2)
2683 fprintf (sched_dump
, ";;\t\tQ-->Ready: insn %s: ",
2684 (*current_sched_info
->print_insn
) (insn
, 0));
2686 ready_add (ready
, insn
, false);
2687 if (sched_verbose
>= 2)
2688 fprintf (sched_dump
, "moving to ready with %d stalls\n", stalls
);
2690 free_INSN_LIST_list (&insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]);
2692 advance_one_cycle ();
2697 advance_one_cycle ();
2700 q_ptr
= NEXT_Q_AFTER (q_ptr
, stalls
);
2701 clock_var
+= stalls
;
2705 /* Used by early_queue_to_ready. Determines whether it is "ok" to
2706 prematurely move INSN from the queue to the ready list. Currently,
2707 if a target defines the hook 'is_costly_dependence', this function
2708 uses the hook to check whether there exist any dependences which are
2709 considered costly by the target, between INSN and other insns that
2710 have already been scheduled. Dependences are checked up to Y cycles
2711 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
2712 controlling this value.
2713 (Other considerations could be taken into account instead (or in
2714 addition) depending on user flags and target hooks. */
2717 ok_for_early_queue_removal (rtx insn
)
2719 if (targetm
.sched
.is_costly_dependence
)
2723 int i
= VEC_length (rtx
, scheduled_insns
);
2724 for (n_cycles
= flag_sched_stalled_insns_dep
; n_cycles
; n_cycles
--)
2730 prev_insn
= VEC_index (rtx
, scheduled_insns
, i
);
2732 if (!NOTE_P (prev_insn
))
2736 dep
= sd_find_dep_between (prev_insn
, insn
, true);
2740 cost
= dep_cost (dep
);
2742 if (targetm
.sched
.is_costly_dependence (dep
, cost
,
2743 flag_sched_stalled_insns_dep
- n_cycles
))
2748 if (GET_MODE (prev_insn
) == TImode
) /* end of dispatch group */
2761 /* Remove insns from the queue, before they become "ready" with respect
2762 to FU latency considerations. */
2765 early_queue_to_ready (state_t state
, struct ready_list
*ready
)
2773 state_t temp_state
= alloca (dfa_state_size
);
2775 int insns_removed
= 0;
2778 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
2781 X == 0: There is no limit on how many queued insns can be removed
2782 prematurely. (flag_sched_stalled_insns = -1).
2784 X >= 1: Only X queued insns can be removed prematurely in each
2785 invocation. (flag_sched_stalled_insns = X).
2787 Otherwise: Early queue removal is disabled.
2788 (flag_sched_stalled_insns = 0)
2791 if (! flag_sched_stalled_insns
)
2794 for (stalls
= 0; stalls
<= max_insn_queue_index
; stalls
++)
2796 if ((link
= insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]))
2798 if (sched_verbose
> 6)
2799 fprintf (sched_dump
, ";; look at index %d + %d\n", q_ptr
, stalls
);
2804 next_link
= XEXP (link
, 1);
2805 insn
= XEXP (link
, 0);
2806 if (insn
&& sched_verbose
> 6)
2807 print_rtl_single (sched_dump
, insn
);
2809 memcpy (temp_state
, state
, dfa_state_size
);
2810 if (recog_memoized (insn
) < 0)
2811 /* non-negative to indicate that it's not ready
2812 to avoid infinite Q->R->Q->R... */
2815 cost
= state_transition (temp_state
, insn
);
2817 if (sched_verbose
>= 6)
2818 fprintf (sched_dump
, "transition cost = %d\n", cost
);
2820 move_to_ready
= false;
2823 move_to_ready
= ok_for_early_queue_removal (insn
);
2824 if (move_to_ready
== true)
2826 /* move from Q to R */
2828 ready_add (ready
, insn
, false);
2831 XEXP (prev_link
, 1) = next_link
;
2833 insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)] = next_link
;
2835 free_INSN_LIST_node (link
);
2837 if (sched_verbose
>= 2)
2838 fprintf (sched_dump
, ";;\t\tEarly Q-->Ready: insn %s\n",
2839 (*current_sched_info
->print_insn
) (insn
, 0));
2842 if (insns_removed
== flag_sched_stalled_insns
)
2843 /* Remove no more than flag_sched_stalled_insns insns
2844 from Q at a time. */
2845 return insns_removed
;
2849 if (move_to_ready
== false)
2856 } /* for stalls.. */
2858 return insns_removed
;
2862 /* Print the ready list for debugging purposes. Callable from debugger. */
2865 debug_ready_list (struct ready_list
*ready
)
2870 if (ready
->n_ready
== 0)
2872 fprintf (sched_dump
, "\n");
2876 p
= ready_lastpos (ready
);
2877 for (i
= 0; i
< ready
->n_ready
; i
++)
2879 fprintf (sched_dump
, " %s:%d",
2880 (*current_sched_info
->print_insn
) (p
[i
], 0),
2882 if (sched_pressure_p
)
2883 fprintf (sched_dump
, "(cost=%d",
2884 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p
[i
]));
2885 if (INSN_TICK (p
[i
]) > clock_var
)
2886 fprintf (sched_dump
, ":delay=%d", INSN_TICK (p
[i
]) - clock_var
);
2887 if (sched_pressure_p
)
2888 fprintf (sched_dump
, ")");
2890 fprintf (sched_dump
, "\n");
2893 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
2894 NOTEs. This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
2895 replaces the epilogue note in the correct basic block. */
2897 reemit_notes (rtx insn
)
2899 rtx note
, last
= insn
;
2901 for (note
= REG_NOTES (insn
); note
; note
= XEXP (note
, 1))
2903 if (REG_NOTE_KIND (note
) == REG_SAVE_NOTE
)
2905 enum insn_note note_type
= (enum insn_note
) INTVAL (XEXP (note
, 0));
2907 last
= emit_note_before (note_type
, last
);
2908 remove_note (insn
, note
);
2913 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
2915 move_insn (rtx insn
, rtx last
, rtx nt
)
2917 if (PREV_INSN (insn
) != last
)
2923 bb
= BLOCK_FOR_INSN (insn
);
2925 /* BB_HEAD is either LABEL or NOTE. */
2926 gcc_assert (BB_HEAD (bb
) != insn
);
2928 if (BB_END (bb
) == insn
)
2929 /* If this is last instruction in BB, move end marker one
2932 /* Jumps are always placed at the end of basic block. */
2933 jump_p
= control_flow_insn_p (insn
);
2936 || ((common_sched_info
->sched_pass_id
== SCHED_RGN_PASS
)
2937 && IS_SPECULATION_BRANCHY_CHECK_P (insn
))
2938 || (common_sched_info
->sched_pass_id
2939 == SCHED_EBB_PASS
));
2941 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn
)) == bb
);
2943 BB_END (bb
) = PREV_INSN (insn
);
2946 gcc_assert (BB_END (bb
) != last
);
2949 /* We move the block note along with jump. */
2953 note
= NEXT_INSN (insn
);
2954 while (NOTE_NOT_BB_P (note
) && note
!= nt
)
2955 note
= NEXT_INSN (note
);
2959 || BARRIER_P (note
)))
2960 note
= NEXT_INSN (note
);
2962 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
2967 NEXT_INSN (PREV_INSN (insn
)) = NEXT_INSN (note
);
2968 PREV_INSN (NEXT_INSN (note
)) = PREV_INSN (insn
);
2970 NEXT_INSN (note
) = NEXT_INSN (last
);
2971 PREV_INSN (NEXT_INSN (last
)) = note
;
2973 NEXT_INSN (last
) = insn
;
2974 PREV_INSN (insn
) = last
;
2976 bb
= BLOCK_FOR_INSN (last
);
2980 fix_jump_move (insn
);
2982 if (BLOCK_FOR_INSN (insn
) != bb
)
2983 move_block_after_check (insn
);
2985 gcc_assert (BB_END (bb
) == last
);
2988 df_insn_change_bb (insn
, bb
);
2990 /* Update BB_END, if needed. */
2991 if (BB_END (bb
) == last
)
2995 SCHED_GROUP_P (insn
) = 0;
2998 /* Return true if scheduling INSN will finish current clock cycle. */
3000 insn_finishes_cycle_p (rtx insn
)
3002 if (SCHED_GROUP_P (insn
))
3003 /* After issuing INSN, rest of the sched_group will be forced to issue
3004 in order. Don't make any plans for the rest of cycle. */
3007 /* Finishing the block will, apparently, finish the cycle. */
3008 if (current_sched_info
->insn_finishes_block_p
3009 && current_sched_info
->insn_finishes_block_p (insn
))
3015 /* Define type for target data used in multipass scheduling. */
3016 #ifndef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T
3017 # define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T int
3019 typedef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T first_cycle_multipass_data_t
;
3021 /* The following structure describe an entry of the stack of choices. */
3024 /* Ordinal number of the issued insn in the ready queue. */
3026 /* The number of the rest insns whose issues we should try. */
3028 /* The number of issued essential insns. */
3030 /* State after issuing the insn. */
3032 /* Target-specific data. */
3033 first_cycle_multipass_data_t target_data
;
3036 /* The following array is used to implement a stack of choices used in
3037 function max_issue. */
3038 static struct choice_entry
*choice_stack
;
3040 /* This holds the value of the target dfa_lookahead hook. */
3043 /* The following variable value is maximal number of tries of issuing
3044 insns for the first cycle multipass insn scheduling. We define
3045 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
3046 need this constraint if all real insns (with non-negative codes)
3047 had reservations because in this case the algorithm complexity is
3048 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
3049 might be incomplete and such insn might occur. For such
3050 descriptions, the complexity of algorithm (without the constraint)
3051 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
3052 static int max_lookahead_tries
;
3054 /* The following value is value of hook
3055 `first_cycle_multipass_dfa_lookahead' at the last call of
3057 static int cached_first_cycle_multipass_dfa_lookahead
= 0;
3059 /* The following value is value of `issue_rate' at the last call of
3061 static int cached_issue_rate
= 0;
3063 /* The following function returns maximal (or close to maximal) number
3064 of insns which can be issued on the same cycle and one of which
3065 insns is insns with the best rank (the first insn in READY). To
3066 make this function tries different samples of ready insns. READY
3067 is current queue `ready'. Global array READY_TRY reflects what
3068 insns are already issued in this try. The function stops immediately,
3069 if it reached the such a solution, that all instruction can be issued.
3070 INDEX will contain index of the best insn in READY. The following
3071 function is used only for first cycle multipass scheduling.
3075 This function expects recognized insns only. All USEs,
3076 CLOBBERs, etc must be filtered elsewhere. */
3078 max_issue (struct ready_list
*ready
, int privileged_n
, state_t state
,
3079 bool first_cycle_insn_p
, int *index
)
3081 int n
, i
, all
, n_ready
, best
, delay
, tries_num
;
3083 struct choice_entry
*top
;
3086 n_ready
= ready
->n_ready
;
3087 gcc_assert (dfa_lookahead
>= 1 && privileged_n
>= 0
3088 && privileged_n
<= n_ready
);
3090 /* Init MAX_LOOKAHEAD_TRIES. */
3091 if (cached_first_cycle_multipass_dfa_lookahead
!= dfa_lookahead
)
3093 cached_first_cycle_multipass_dfa_lookahead
= dfa_lookahead
;
3094 max_lookahead_tries
= 100;
3095 for (i
= 0; i
< issue_rate
; i
++)
3096 max_lookahead_tries
*= dfa_lookahead
;
3099 /* Init max_points. */
3100 more_issue
= issue_rate
- cycle_issued_insns
;
3101 gcc_assert (more_issue
>= 0);
3103 /* The number of the issued insns in the best solution. */
3108 /* Set initial state of the search. */
3109 memcpy (top
->state
, state
, dfa_state_size
);
3110 top
->rest
= dfa_lookahead
;
3112 if (targetm
.sched
.first_cycle_multipass_begin
)
3113 targetm
.sched
.first_cycle_multipass_begin (&top
->target_data
,
3115 first_cycle_insn_p
);
3117 /* Count the number of the insns to search among. */
3118 for (all
= i
= 0; i
< n_ready
; i
++)
3122 /* I is the index of the insn to try next. */
3127 if (/* If we've reached a dead end or searched enough of what we have
3130 /* or have nothing else to try... */
3132 /* or should not issue more. */
3133 || top
->n
>= more_issue
)
3135 /* ??? (... || i == n_ready). */
3136 gcc_assert (i
<= n_ready
);
3138 /* We should not issue more than issue_rate instructions. */
3139 gcc_assert (top
->n
<= more_issue
);
3141 if (top
== choice_stack
)
3144 if (best
< top
- choice_stack
)
3149 /* Try to find issued privileged insn. */
3150 while (n
&& !ready_try
[--n
])
3154 if (/* If all insns are equally good... */
3156 /* Or a privileged insn will be issued. */
3158 /* Then we have a solution. */
3160 best
= top
- choice_stack
;
3161 /* This is the index of the insn issued first in this
3163 *index
= choice_stack
[1].index
;
3164 if (top
->n
== more_issue
|| best
== all
)
3169 /* Set ready-list index to point to the last insn
3170 ('i++' below will advance it to the next insn). */
3176 if (targetm
.sched
.first_cycle_multipass_backtrack
)
3177 targetm
.sched
.first_cycle_multipass_backtrack (&top
->target_data
,
3178 ready_try
, n_ready
);
3181 memcpy (state
, top
->state
, dfa_state_size
);
3183 else if (!ready_try
[i
])
3186 if (tries_num
> max_lookahead_tries
)
3188 insn
= ready_element (ready
, i
);
3189 delay
= state_transition (state
, insn
);
3192 if (state_dead_lock_p (state
)
3193 || insn_finishes_cycle_p (insn
))
3194 /* We won't issue any more instructions in the next
3201 if (memcmp (top
->state
, state
, dfa_state_size
) != 0)
3204 /* Advance to the next choice_entry. */
3206 /* Initialize it. */
3207 top
->rest
= dfa_lookahead
;
3210 memcpy (top
->state
, state
, dfa_state_size
);
3213 if (targetm
.sched
.first_cycle_multipass_issue
)
3214 targetm
.sched
.first_cycle_multipass_issue (&top
->target_data
,
3224 /* Increase ready-list index. */
3228 if (targetm
.sched
.first_cycle_multipass_end
)
3229 targetm
.sched
.first_cycle_multipass_end (best
!= 0
3230 ? &choice_stack
[1].target_data
3233 /* Restore the original state of the DFA. */
3234 memcpy (state
, choice_stack
->state
, dfa_state_size
);
3239 /* The following function chooses insn from READY and modifies
3240 READY. The following function is used only for first
3241 cycle multipass scheduling.
3243 -1 if cycle should be advanced,
3244 0 if INSN_PTR is set to point to the desirable insn,
3245 1 if choose_ready () should be restarted without advancing the cycle. */
3247 choose_ready (struct ready_list
*ready
, bool first_cycle_insn_p
,
3252 if (dbg_cnt (sched_insn
) == false)
3254 rtx insn
= nonscheduled_insns_begin
;
3257 insn
= next_nonnote_insn (insn
);
3259 while (QUEUE_INDEX (insn
) == QUEUE_SCHEDULED
);
3261 if (QUEUE_INDEX (insn
) == QUEUE_READY
)
3262 /* INSN is in the ready_list. */
3264 nonscheduled_insns_begin
= insn
;
3265 ready_remove_insn (insn
);
3270 /* INSN is in the queue. Advance cycle to move it to the ready list. */
3276 if (targetm
.sched
.first_cycle_multipass_dfa_lookahead
)
3277 lookahead
= targetm
.sched
.first_cycle_multipass_dfa_lookahead ();
3278 if (lookahead
<= 0 || SCHED_GROUP_P (ready_element (ready
, 0))
3279 || DEBUG_INSN_P (ready_element (ready
, 0)))
3281 if (targetm
.sched
.dispatch (NULL_RTX
, IS_DISPATCH_ON
))
3282 *insn_ptr
= ready_remove_first_dispatch (ready
);
3284 *insn_ptr
= ready_remove_first (ready
);
3290 /* Try to choose the better insn. */
3291 int index
= 0, i
, n
;
3293 int try_data
= 1, try_control
= 1;
3296 insn
= ready_element (ready
, 0);
3297 if (INSN_CODE (insn
) < 0)
3299 *insn_ptr
= ready_remove_first (ready
);
3304 && spec_info
->flags
& (PREFER_NON_DATA_SPEC
3305 | PREFER_NON_CONTROL_SPEC
))
3307 for (i
= 0, n
= ready
->n_ready
; i
< n
; i
++)
3312 x
= ready_element (ready
, i
);
3315 if (spec_info
->flags
& PREFER_NON_DATA_SPEC
3316 && !(s
& DATA_SPEC
))
3319 if (!(spec_info
->flags
& PREFER_NON_CONTROL_SPEC
)
3324 if (spec_info
->flags
& PREFER_NON_CONTROL_SPEC
3325 && !(s
& CONTROL_SPEC
))
3328 if (!(spec_info
->flags
& PREFER_NON_DATA_SPEC
) || !try_data
)
3334 ts
= TODO_SPEC (insn
);
3335 if ((ts
& SPECULATIVE
)
3336 && (((!try_data
&& (ts
& DATA_SPEC
))
3337 || (!try_control
&& (ts
& CONTROL_SPEC
)))
3338 || (targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard_spec
3340 .first_cycle_multipass_dfa_lookahead_guard_spec (insn
))))
3341 /* Discard speculative instruction that stands first in the ready
3344 change_queue_index (insn
, 1);
3350 for (i
= 1; i
< ready
->n_ready
; i
++)
3352 insn
= ready_element (ready
, i
);
3355 = ((!try_data
&& (TODO_SPEC (insn
) & DATA_SPEC
))
3356 || (!try_control
&& (TODO_SPEC (insn
) & CONTROL_SPEC
)));
3359 /* Let the target filter the search space. */
3360 for (i
= 1; i
< ready
->n_ready
; i
++)
3363 insn
= ready_element (ready
, i
);
3365 /* If this insn is recognizable we should have already
3366 recognized it earlier.
3367 ??? Not very clear where this is supposed to be done.
3369 gcc_checking_assert (INSN_CODE (insn
) >= 0
3370 || recog_memoized (insn
) < 0);
3373 = (/* INSN_CODE check can be omitted here as it is also done later
3375 INSN_CODE (insn
) < 0
3376 || (targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard
3377 && !targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard
3381 if (max_issue (ready
, 1, curr_state
, first_cycle_insn_p
, &index
) == 0)
3383 *insn_ptr
= ready_remove_first (ready
);
3384 if (sched_verbose
>= 4)
3385 fprintf (sched_dump
, ";;\t\tChosen insn (but can't issue) : %s \n",
3386 (*current_sched_info
->print_insn
) (*insn_ptr
, 0));
3391 if (sched_verbose
>= 4)
3392 fprintf (sched_dump
, ";;\t\tChosen insn : %s\n",
3393 (*current_sched_info
->print_insn
)
3394 (ready_element (ready
, index
), 0));
3396 *insn_ptr
= ready_remove (ready
, index
);
3402 /* This function is called when we have successfully scheduled a
3403 block. It uses the schedule stored in the scheduled_insns vector
3404 to rearrange the RTL. PREV_HEAD is used as the anchor to which we
3405 append the scheduled insns; TAIL is the insn after the scheduled
3406 block. TARGET_BB is the argument passed to schedule_block. */
3409 commit_schedule (rtx prev_head
, rtx tail
, basic_block
*target_bb
)
3414 last_scheduled_insn
= prev_head
;
3416 VEC_iterate (rtx
, scheduled_insns
, i
, insn
);
3419 if (control_flow_insn_p (last_scheduled_insn
)
3420 || current_sched_info
->advance_target_bb (*target_bb
, insn
))
3422 *target_bb
= current_sched_info
->advance_target_bb (*target_bb
, 0);
3428 x
= next_real_insn (last_scheduled_insn
);
3430 dump_new_block_header (1, *target_bb
, x
, tail
);
3433 last_scheduled_insn
= bb_note (*target_bb
);
3436 if (current_sched_info
->begin_move_insn
)
3437 (*current_sched_info
->begin_move_insn
) (insn
, last_scheduled_insn
);
3438 move_insn (insn
, last_scheduled_insn
,
3439 current_sched_info
->next_tail
);
3440 if (!DEBUG_INSN_P (insn
))
3441 reemit_notes (insn
);
3442 last_scheduled_insn
= insn
;
3445 VEC_truncate (rtx
, scheduled_insns
, 0);
3448 /* Examine all insns on the ready list and queue those which can't be
3449 issued in this cycle. TEMP_STATE is temporary scheduler state we
3450 can use as scratch space. If FIRST_CYCLE_INSN_P is true, no insns
3451 have been issued for the current cycle, which means it is valid to
3452 issue an asm statement.
3454 If SHADOWS_ONLY_P is true, we eliminate all real insns and only
3455 leave those for which SHADOW_P is true.
3457 Return the number of cycles we must
3458 advance to find the next ready instruction, or zero if there remain
3459 insns on the ready list. */
3462 prune_ready_list (state_t temp_state
, bool first_cycle_insn_p
,
3463 bool shadows_only_p
)
3468 for (i
= 0; i
< ready
.n_ready
; i
++)
3470 rtx insn
= ready_element (&ready
, i
);
3472 const char *reason
= "resource conflict";
3474 if (shadows_only_p
&& !DEBUG_INSN_P (insn
) && !SHADOW_P (insn
))
3477 reason
= "not a shadow";
3479 else if (recog_memoized (insn
) < 0)
3481 if (!first_cycle_insn_p
3482 && (GET_CODE (PATTERN (insn
)) == ASM_INPUT
3483 || asm_noperands (PATTERN (insn
)) >= 0))
3487 else if (sched_pressure_p
)
3495 struct delay_pair
*delay_entry
;
3497 = (struct delay_pair
*)htab_find_with_hash (delay_htab
, insn
,
3498 htab_hash_pointer (insn
));
3499 while (delay_entry
&& delay_cost
== 0)
3501 delay_cost
= estimate_shadow_tick (delay_entry
);
3502 if (delay_cost
> max_insn_queue_index
)
3503 delay_cost
= max_insn_queue_index
;
3504 delay_entry
= delay_entry
->next_same_i1
;
3508 memcpy (temp_state
, curr_state
, dfa_state_size
);
3509 cost
= state_transition (temp_state
, insn
);
3514 if (cost
< delay_cost
)
3517 reason
= "shadow tick";
3522 ready_remove (&ready
, i
);
3523 queue_insn (insn
, cost
, reason
);
3529 /* Called when we detect that the schedule is impossible. We examine the
3530 backtrack queue to find the earliest insn that caused this condition. */
3532 static struct haifa_saved_data
*
3533 verify_shadows (void)
3535 struct haifa_saved_data
*save
, *earliest_fail
= NULL
;
3536 for (save
= backtrack_queue
; save
; save
= save
->next
)
3539 struct delay_pair
*pair
= save
->delay_pair
;
3542 for (; pair
; pair
= pair
->next_same_i1
)
3546 if (QUEUE_INDEX (i2
) == QUEUE_SCHEDULED
)
3549 t
= INSN_TICK (i1
) + pair_delay (pair
);
3552 if (sched_verbose
>= 2)
3553 fprintf (sched_dump
,
3554 ";;\t\tfailed delay requirements for %d/%d (%d->%d)"
3556 INSN_UID (pair
->i1
), INSN_UID (pair
->i2
),
3557 INSN_TICK (pair
->i1
), INSN_EXACT_TICK (pair
->i2
));
3558 earliest_fail
= save
;
3561 if (QUEUE_INDEX (i2
) >= 0)
3563 int queued_for
= INSN_TICK (i2
);
3567 if (sched_verbose
>= 2)
3568 fprintf (sched_dump
,
3569 ";;\t\tfailed delay requirements for %d/%d"
3570 " (%d->%d), queued too late\n",
3571 INSN_UID (pair
->i1
), INSN_UID (pair
->i2
),
3572 INSN_TICK (pair
->i1
), INSN_EXACT_TICK (pair
->i2
));
3573 earliest_fail
= save
;
3580 return earliest_fail
;
3583 /* Use forward list scheduling to rearrange insns of block pointed to by
3584 TARGET_BB, possibly bringing insns from subsequent blocks in the same
3588 schedule_block (basic_block
*target_bb
)
3591 struct sched_block_state ls
;
3592 state_t temp_state
= NULL
; /* It is used for multipass scheduling. */
3593 int sort_p
, advance
, start_clock_var
;
3595 /* Head/tail info for this block. */
3596 rtx prev_head
= current_sched_info
->prev_head
;
3597 rtx next_tail
= current_sched_info
->next_tail
;
3598 rtx head
= NEXT_INSN (prev_head
);
3599 rtx tail
= PREV_INSN (next_tail
);
3601 /* We used to have code to avoid getting parameters moved from hard
3602 argument registers into pseudos.
3604 However, it was removed when it proved to be of marginal benefit
3605 and caused problems because schedule_block and compute_forward_dependences
3606 had different notions of what the "head" insn was. */
3608 gcc_assert (head
!= tail
|| INSN_P (head
));
3610 haifa_recovery_bb_recently_added_p
= false;
3612 backtrack_queue
= NULL
;
3616 dump_new_block_header (0, *target_bb
, head
, tail
);
3618 state_reset (curr_state
);
3620 /* Clear the ready list. */
3621 ready
.first
= ready
.veclen
- 1;
3625 /* It is used for first cycle multipass scheduling. */
3626 temp_state
= alloca (dfa_state_size
);
3628 if (targetm
.sched
.init
)
3629 targetm
.sched
.init (sched_dump
, sched_verbose
, ready
.veclen
);
3631 /* We start inserting insns after PREV_HEAD. */
3632 last_scheduled_insn
= nonscheduled_insns_begin
= prev_head
;
3633 last_nondebug_scheduled_insn
= NULL_RTX
;
3635 gcc_assert ((NOTE_P (last_scheduled_insn
)
3636 || DEBUG_INSN_P (last_scheduled_insn
))
3637 && BLOCK_FOR_INSN (last_scheduled_insn
) == *target_bb
);
3639 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
3644 insn_queue
= XALLOCAVEC (rtx
, max_insn_queue_index
+ 1);
3645 memset (insn_queue
, 0, (max_insn_queue_index
+ 1) * sizeof (rtx
));
3647 /* Start just before the beginning of time. */
3650 /* We need queue and ready lists and clock_var be initialized
3651 in try_ready () (which is called through init_ready_list ()). */
3652 (*current_sched_info
->init_ready_list
) ();
3654 /* The algorithm is O(n^2) in the number of ready insns at any given
3655 time in the worst case. Before reload we are more likely to have
3656 big lists so truncate them to a reasonable size. */
3657 if (!reload_completed
3658 && ready
.n_ready
- ready
.n_debug
> MAX_SCHED_READY_INSNS
)
3660 ready_sort (&ready
);
3662 /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
3663 If there are debug insns, we know they're first. */
3664 for (i
= MAX_SCHED_READY_INSNS
+ ready
.n_debug
; i
< ready
.n_ready
; i
++)
3665 if (!SCHED_GROUP_P (ready_element (&ready
, i
)))
3668 if (sched_verbose
>= 2)
3670 fprintf (sched_dump
,
3671 ";;\t\tReady list on entry: %d insns\n", ready
.n_ready
);
3672 fprintf (sched_dump
,
3673 ";;\t\t before reload => truncated to %d insns\n", i
);
3676 /* Delay all insns past it for 1 cycle. If debug counter is
3677 activated make an exception for the insn right after
3678 nonscheduled_insns_begin. */
3682 if (dbg_cnt (sched_insn
) == false)
3683 skip_insn
= next_nonnote_insn (nonscheduled_insns_begin
);
3685 skip_insn
= NULL_RTX
;
3687 while (i
< ready
.n_ready
)
3691 insn
= ready_remove (&ready
, i
);
3693 if (insn
!= skip_insn
)
3694 queue_insn (insn
, 1, "list truncated");
3697 ready_add (&ready
, skip_insn
, true);
3701 /* Now we can restore basic block notes and maintain precise cfg. */
3702 restore_bb_notes (*target_bb
);
3704 last_clock_var
= -1;
3708 gcc_assert (VEC_length (rtx
, scheduled_insns
) == 0);
3710 must_backtrack
= false;
3712 /* Loop until all the insns in BB are scheduled. */
3713 while ((*current_sched_info
->schedule_more_p
) ())
3717 start_clock_var
= clock_var
;
3721 advance_one_cycle ();
3723 /* Add to the ready list all pending insns that can be issued now.
3724 If there are no ready insns, increment clock until one
3725 is ready and add all pending insns at that point to the ready
3727 queue_to_ready (&ready
);
3729 gcc_assert (ready
.n_ready
);
3731 if (sched_verbose
>= 2)
3733 fprintf (sched_dump
, ";;\t\tReady list after queue_to_ready: ");
3734 debug_ready_list (&ready
);
3736 advance
-= clock_var
- start_clock_var
;
3738 while (advance
> 0);
3740 if (ready
.n_ready
> 0)
3741 prune_ready_list (temp_state
, true, false);
3742 if (ready
.n_ready
== 0)
3747 ls
.first_cycle_insn_p
= true;
3748 ls
.shadows_only_p
= false;
3749 cycle_issued_insns
= 0;
3750 ls
.can_issue_more
= issue_rate
;
3757 if (sort_p
&& ready
.n_ready
> 0)
3759 /* Sort the ready list based on priority. This must be
3760 done every iteration through the loop, as schedule_insn
3761 may have readied additional insns that will not be
3762 sorted correctly. */
3763 ready_sort (&ready
);
3765 if (sched_verbose
>= 2)
3767 fprintf (sched_dump
, ";;\t\tReady list after ready_sort: ");
3768 debug_ready_list (&ready
);
3772 /* We don't want md sched reorder to even see debug isns, so put
3773 them out right away. */
3774 if (ready
.n_ready
&& DEBUG_INSN_P (ready_element (&ready
, 0))
3775 && (*current_sched_info
->schedule_more_p
) ())
3777 while (ready
.n_ready
&& DEBUG_INSN_P (ready_element (&ready
, 0)))
3779 rtx insn
= ready_remove_first (&ready
);
3780 gcc_assert (DEBUG_INSN_P (insn
));
3781 (*current_sched_info
->begin_schedule_ready
) (insn
);
3782 VEC_safe_push (rtx
, heap
, scheduled_insns
, insn
);
3783 last_scheduled_insn
= insn
;
3784 advance
= schedule_insn (insn
);
3785 gcc_assert (advance
== 0);
3786 if (ready
.n_ready
> 0)
3787 ready_sort (&ready
);
3791 if (ls
.first_cycle_insn_p
&& !ready
.n_ready
)
3794 resume_after_backtrack
:
3795 /* Allow the target to reorder the list, typically for
3796 better instruction bundling. */
3798 && (ready
.n_ready
== 0
3799 || !SCHED_GROUP_P (ready_element (&ready
, 0))))
3801 if (ls
.first_cycle_insn_p
&& targetm
.sched
.reorder
)
3803 = targetm
.sched
.reorder (sched_dump
, sched_verbose
,
3804 ready_lastpos (&ready
),
3805 &ready
.n_ready
, clock_var
);
3806 else if (!ls
.first_cycle_insn_p
&& targetm
.sched
.reorder2
)
3808 = targetm
.sched
.reorder2 (sched_dump
, sched_verbose
,
3810 ? ready_lastpos (&ready
) : NULL
,
3811 &ready
.n_ready
, clock_var
);
3814 restart_choose_ready
:
3815 if (sched_verbose
>= 2)
3817 fprintf (sched_dump
, ";;\tReady list (t = %3d): ",
3819 debug_ready_list (&ready
);
3820 if (sched_pressure_p
)
3821 print_curr_reg_pressure ();
3824 if (ready
.n_ready
== 0
3825 && ls
.can_issue_more
3826 && reload_completed
)
3828 /* Allow scheduling insns directly from the queue in case
3829 there's nothing better to do (ready list is empty) but
3830 there are still vacant dispatch slots in the current cycle. */
3831 if (sched_verbose
>= 6)
3832 fprintf (sched_dump
,";;\t\tSecond chance\n");
3833 memcpy (temp_state
, curr_state
, dfa_state_size
);
3834 if (early_queue_to_ready (temp_state
, &ready
))
3835 ready_sort (&ready
);
3838 if (ready
.n_ready
== 0
3839 || !ls
.can_issue_more
3840 || state_dead_lock_p (curr_state
)
3841 || !(*current_sched_info
->schedule_more_p
) ())
3844 /* Select and remove the insn from the ready list. */
3850 res
= choose_ready (&ready
, ls
.first_cycle_insn_p
, &insn
);
3856 goto restart_choose_ready
;
3858 gcc_assert (insn
!= NULL_RTX
);
3861 insn
= ready_remove_first (&ready
);
3863 if (sched_pressure_p
&& INSN_TICK (insn
) > clock_var
)
3865 ready_add (&ready
, insn
, true);
3870 if (targetm
.sched
.dfa_new_cycle
3871 && targetm
.sched
.dfa_new_cycle (sched_dump
, sched_verbose
,
3872 insn
, last_clock_var
,
3873 clock_var
, &sort_p
))
3874 /* SORT_P is used by the target to override sorting
3875 of the ready list. This is needed when the target
3876 has modified its internal structures expecting that
3877 the insn will be issued next. As we need the insn
3878 to have the highest priority (so it will be returned by
3879 the ready_remove_first call above), we invoke
3880 ready_add (&ready, insn, true).
3881 But, still, there is one issue: INSN can be later
3882 discarded by scheduler's front end through
3883 current_sched_info->can_schedule_ready_p, hence, won't
3886 ready_add (&ready
, insn
, true);
3892 if (current_sched_info
->can_schedule_ready_p
3893 && ! (*current_sched_info
->can_schedule_ready_p
) (insn
))
3894 /* We normally get here only if we don't want to move
3895 insn from the split block. */
3897 TODO_SPEC (insn
) = HARD_DEP
;
3898 goto restart_choose_ready
;
3903 /* If this insn is the first part of a delay-slot pair, record a
3905 struct delay_pair
*delay_entry
;
3907 = (struct delay_pair
*)htab_find_with_hash (delay_htab
, insn
,
3908 htab_hash_pointer (insn
));
3911 save_backtrack_point (delay_entry
, ls
);
3912 if (sched_verbose
>= 2)
3913 fprintf (sched_dump
, ";;\t\tsaving backtrack point\n");
3917 /* DECISION is made. */
3919 if (TODO_SPEC (insn
) & SPECULATIVE
)
3920 generate_recovery_code (insn
);
3922 if (targetm
.sched
.dispatch (NULL_RTX
, IS_DISPATCH_ON
))
3923 targetm
.sched
.dispatch_do (insn
, ADD_TO_DISPATCH_WINDOW
);
3925 /* Update counters, etc in the scheduler's front end. */
3926 (*current_sched_info
->begin_schedule_ready
) (insn
);
3927 VEC_safe_push (rtx
, heap
, scheduled_insns
, insn
);
3928 gcc_assert (NONDEBUG_INSN_P (insn
));
3929 last_nondebug_scheduled_insn
= last_scheduled_insn
= insn
;
3931 if (recog_memoized (insn
) >= 0)
3933 memcpy (temp_state
, curr_state
, dfa_state_size
);
3934 cost
= state_transition (curr_state
, insn
);
3935 if (!sched_pressure_p
)
3936 gcc_assert (cost
< 0);
3937 if (memcmp (temp_state
, curr_state
, dfa_state_size
) != 0)
3938 cycle_issued_insns
++;
3942 asm_p
= (GET_CODE (PATTERN (insn
)) == ASM_INPUT
3943 || asm_noperands (PATTERN (insn
)) >= 0);
3945 if (targetm
.sched
.variable_issue
)
3947 targetm
.sched
.variable_issue (sched_dump
, sched_verbose
,
3948 insn
, ls
.can_issue_more
);
3949 /* A naked CLOBBER or USE generates no instruction, so do
3950 not count them against the issue rate. */
3951 else if (GET_CODE (PATTERN (insn
)) != USE
3952 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
3953 ls
.can_issue_more
--;
3954 advance
= schedule_insn (insn
);
3956 if (SHADOW_P (insn
))
3957 ls
.shadows_only_p
= true;
3959 /* After issuing an asm insn we should start a new cycle. */
3960 if (advance
== 0 && asm_p
)
3969 ls
.first_cycle_insn_p
= false;
3970 if (ready
.n_ready
> 0)
3971 prune_ready_list (temp_state
, false, ls
.shadows_only_p
);
3975 if (!must_backtrack
)
3976 for (i
= 0; i
< ready
.n_ready
; i
++)
3978 rtx insn
= ready_element (&ready
, i
);
3979 if (INSN_EXACT_TICK (insn
) == clock_var
)
3981 must_backtrack
= true;
3986 while (must_backtrack
)
3988 struct haifa_saved_data
*failed
;
3991 must_backtrack
= false;
3992 failed
= verify_shadows ();
3993 gcc_assert (failed
);
3995 failed_insn
= failed
->delay_pair
->i1
;
3996 unschedule_insns_until (failed_insn
);
3997 while (failed
!= backtrack_queue
)
3998 free_topmost_backtrack_point (true);
3999 restore_last_backtrack_point (&ls
);
4000 if (sched_verbose
>= 2)
4001 fprintf (sched_dump
, ";;\t\trewind to cycle %d\n", clock_var
);
4002 /* Delay by at least a cycle. This could cause additional
4004 queue_insn (failed_insn
, 1, "backtracked");
4008 if (ready
.n_ready
> 0)
4009 goto resume_after_backtrack
;
4012 if (clock_var
== 0 && ls
.first_cycle_insn_p
)
4023 fprintf (sched_dump
, ";;\tReady list (final): ");
4024 debug_ready_list (&ready
);
4027 if (current_sched_info
->queue_must_finish_empty
)
4028 /* Sanity check -- queue must be empty now. Meaningless if region has
4030 gcc_assert (!q_size
&& !ready
.n_ready
&& !ready
.n_debug
);
4033 /* We must maintain QUEUE_INDEX between blocks in region. */
4034 for (i
= ready
.n_ready
- 1; i
>= 0; i
--)
4038 x
= ready_element (&ready
, i
);
4039 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
4040 TODO_SPEC (x
) = HARD_DEP
;
4044 for (i
= 0; i
<= max_insn_queue_index
; i
++)
4047 for (link
= insn_queue
[i
]; link
; link
= XEXP (link
, 1))
4052 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
4053 TODO_SPEC (x
) = HARD_DEP
;
4055 free_INSN_LIST_list (&insn_queue
[i
]);
4059 commit_schedule (prev_head
, tail
, target_bb
);
4061 fprintf (sched_dump
, ";; total time = %d\n", clock_var
);
4063 if (!current_sched_info
->queue_must_finish_empty
4064 || haifa_recovery_bb_recently_added_p
)
4066 /* INSN_TICK (minimum clock tick at which the insn becomes
4067 ready) may be not correct for the insn in the subsequent
4068 blocks of the region. We should use a correct value of
4069 `clock_var' or modify INSN_TICK. It is better to keep
4070 clock_var value equal to 0 at the start of a basic block.
4071 Therefore we modify INSN_TICK here. */
4072 fix_inter_tick (NEXT_INSN (prev_head
), last_scheduled_insn
);
4075 if (targetm
.sched
.finish
)
4077 targetm
.sched
.finish (sched_dump
, sched_verbose
);
4078 /* Target might have added some instructions to the scheduled block
4079 in its md_finish () hook. These new insns don't have any data
4080 initialized and to identify them we extend h_i_d so that they'll
4082 sched_extend_luids ();
4086 fprintf (sched_dump
, ";; new head = %d\n;; new tail = %d\n\n",
4087 INSN_UID (head
), INSN_UID (tail
));
4089 /* Update head/tail boundaries. */
4090 head
= NEXT_INSN (prev_head
);
4091 tail
= last_scheduled_insn
;
4093 head
= restore_other_notes (head
, NULL
);
4095 current_sched_info
->head
= head
;
4096 current_sched_info
->tail
= tail
;
4098 free_backtrack_queue ();
4101 /* Set_priorities: compute priority of each insn in the block. */
4104 set_priorities (rtx head
, rtx tail
)
4108 int sched_max_insns_priority
=
4109 current_sched_info
->sched_max_insns_priority
;
4112 if (head
== tail
&& ! INSN_P (head
))
4117 prev_head
= PREV_INSN (head
);
4118 for (insn
= tail
; insn
!= prev_head
; insn
= PREV_INSN (insn
))
4124 (void) priority (insn
);
4126 gcc_assert (INSN_PRIORITY_KNOWN (insn
));
4128 sched_max_insns_priority
= MAX (sched_max_insns_priority
,
4129 INSN_PRIORITY (insn
));
4132 current_sched_info
->sched_max_insns_priority
= sched_max_insns_priority
;
4137 /* Set dump and sched_verbose for the desired debugging output. If no
4138 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
4139 For -fsched-verbose=N, N>=10, print everything to stderr. */
4141 setup_sched_dump (void)
4143 sched_verbose
= sched_verbose_param
;
4144 if (sched_verbose_param
== 0 && dump_file
)
4146 sched_dump
= ((sched_verbose_param
>= 10 || !dump_file
)
4147 ? stderr
: dump_file
);
4150 /* Initialize some global state for the scheduler. This function works
4151 with the common data shared between all the schedulers. It is called
4152 from the scheduler specific initialization routine. */
4157 /* Disable speculative loads in their presence if cc0 defined. */
4159 flag_schedule_speculative_load
= 0;
4162 if (targetm
.sched
.dispatch (NULL_RTX
, IS_DISPATCH_ON
))
4163 targetm
.sched
.dispatch_do (NULL_RTX
, DISPATCH_INIT
);
4165 sched_pressure_p
= (flag_sched_pressure
&& ! reload_completed
4166 && common_sched_info
->sched_pass_id
== SCHED_RGN_PASS
);
4168 if (sched_pressure_p
)
4169 ira_setup_eliminable_regset ();
4171 /* Initialize SPEC_INFO. */
4172 if (targetm
.sched
.set_sched_flags
)
4174 spec_info
= &spec_info_var
;
4175 targetm
.sched
.set_sched_flags (spec_info
);
4177 if (spec_info
->mask
!= 0)
4179 spec_info
->data_weakness_cutoff
=
4180 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF
) * MAX_DEP_WEAK
) / 100;
4181 spec_info
->control_weakness_cutoff
=
4182 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF
)
4183 * REG_BR_PROB_BASE
) / 100;
4186 /* So we won't read anything accidentally. */
4191 /* So we won't read anything accidentally. */
4194 /* Initialize issue_rate. */
4195 if (targetm
.sched
.issue_rate
)
4196 issue_rate
= targetm
.sched
.issue_rate ();
4200 if (cached_issue_rate
!= issue_rate
)
4202 cached_issue_rate
= issue_rate
;
4203 /* To invalidate max_lookahead_tries: */
4204 cached_first_cycle_multipass_dfa_lookahead
= 0;
4207 if (targetm
.sched
.first_cycle_multipass_dfa_lookahead
)
4208 dfa_lookahead
= targetm
.sched
.first_cycle_multipass_dfa_lookahead ();
4212 if (targetm
.sched
.init_dfa_pre_cycle_insn
)
4213 targetm
.sched
.init_dfa_pre_cycle_insn ();
4215 if (targetm
.sched
.init_dfa_post_cycle_insn
)
4216 targetm
.sched
.init_dfa_post_cycle_insn ();
4219 dfa_state_size
= state_size ();
4221 init_alias_analysis ();
4224 df_set_flags (DF_LR_RUN_DCE
);
4225 df_note_add_problem ();
4227 /* More problems needed for interloop dep calculation in SMS. */
4228 if (common_sched_info
->sched_pass_id
== SCHED_SMS_PASS
)
4230 df_rd_add_problem ();
4231 df_chain_add_problem (DF_DU_CHAIN
+ DF_UD_CHAIN
);
4236 /* Do not run DCE after reload, as this can kill nops inserted
4238 if (reload_completed
)
4239 df_clear_flags (DF_LR_RUN_DCE
);
4241 regstat_compute_calls_crossed ();
4243 if (targetm
.sched
.init_global
)
4244 targetm
.sched
.init_global (sched_dump
, sched_verbose
, get_max_uid () + 1);
4246 if (sched_pressure_p
)
4248 int i
, max_regno
= max_reg_num ();
4250 ira_set_pseudo_classes (sched_verbose
? sched_dump
: NULL
);
4251 sched_regno_pressure_class
4252 = (enum reg_class
*) xmalloc (max_regno
* sizeof (enum reg_class
));
4253 for (i
= 0; i
< max_regno
; i
++)
4254 sched_regno_pressure_class
[i
]
4255 = (i
< FIRST_PSEUDO_REGISTER
4256 ? ira_pressure_class_translate
[REGNO_REG_CLASS (i
)]
4257 : ira_pressure_class_translate
[reg_allocno_class (i
)]);
4258 curr_reg_live
= BITMAP_ALLOC (NULL
);
4259 saved_reg_live
= BITMAP_ALLOC (NULL
);
4260 region_ref_regs
= BITMAP_ALLOC (NULL
);
4263 curr_state
= xmalloc (dfa_state_size
);
4266 static void haifa_init_only_bb (basic_block
, basic_block
);
4268 /* Initialize data structures specific to the Haifa scheduler. */
4270 haifa_sched_init (void)
4272 setup_sched_dump ();
4275 scheduled_insns
= VEC_alloc (rtx
, heap
, 0);
4277 if (spec_info
!= NULL
)
4279 sched_deps_info
->use_deps_list
= 1;
4280 sched_deps_info
->generate_spec_deps
= 1;
4283 /* Initialize luids, dependency caches, target and h_i_d for the
4286 bb_vec_t bbs
= VEC_alloc (basic_block
, heap
, n_basic_blocks
);
4292 VEC_quick_push (basic_block
, bbs
, bb
);
4293 sched_init_luids (bbs
);
4294 sched_deps_init (true);
4295 sched_extend_target ();
4296 haifa_init_h_i_d (bbs
);
4298 VEC_free (basic_block
, heap
, bbs
);
4301 sched_init_only_bb
= haifa_init_only_bb
;
4302 sched_split_block
= sched_split_block_1
;
4303 sched_create_empty_bb
= sched_create_empty_bb_1
;
4304 haifa_recovery_bb_ever_added_p
= false;
4306 #ifdef ENABLE_CHECKING
4307 /* This is used preferably for finding bugs in check_cfg () itself.
4308 We must call sched_bbs_init () before check_cfg () because check_cfg ()
4309 assumes that the last insn in the last bb has a non-null successor. */
4313 nr_begin_data
= nr_begin_control
= nr_be_in_data
= nr_be_in_control
= 0;
4314 before_recovery
= 0;
4318 /* Finish work with the data specific to the Haifa scheduler. */
4320 haifa_sched_finish (void)
4322 sched_create_empty_bb
= NULL
;
4323 sched_split_block
= NULL
;
4324 sched_init_only_bb
= NULL
;
4326 if (spec_info
&& spec_info
->dump
)
4328 char c
= reload_completed
? 'a' : 'b';
4330 fprintf (spec_info
->dump
,
4331 ";; %s:\n", current_function_name ());
4333 fprintf (spec_info
->dump
,
4334 ";; Procedure %cr-begin-data-spec motions == %d\n",
4336 fprintf (spec_info
->dump
,
4337 ";; Procedure %cr-be-in-data-spec motions == %d\n",
4339 fprintf (spec_info
->dump
,
4340 ";; Procedure %cr-begin-control-spec motions == %d\n",
4341 c
, nr_begin_control
);
4342 fprintf (spec_info
->dump
,
4343 ";; Procedure %cr-be-in-control-spec motions == %d\n",
4344 c
, nr_be_in_control
);
4347 VEC_free (rtx
, heap
, scheduled_insns
);
4349 /* Finalize h_i_d, dependency caches, and luids for the whole
4350 function. Target will be finalized in md_global_finish (). */
4351 sched_deps_finish ();
4352 sched_finish_luids ();
4353 current_sched_info
= NULL
;
4357 /* Free global data used during insn scheduling. This function works with
4358 the common data shared between the schedulers. */
4363 haifa_finish_h_i_d ();
4364 if (sched_pressure_p
)
4366 free (sched_regno_pressure_class
);
4367 BITMAP_FREE (region_ref_regs
);
4368 BITMAP_FREE (saved_reg_live
);
4369 BITMAP_FREE (curr_reg_live
);
4373 if (targetm
.sched
.finish_global
)
4374 targetm
.sched
.finish_global (sched_dump
, sched_verbose
);
4376 end_alias_analysis ();
4378 regstat_free_calls_crossed ();
4382 #ifdef ENABLE_CHECKING
4383 /* After reload ia64 backend clobbers CFG, so can't check anything. */
4384 if (!reload_completed
)
4389 /* Free all delay_pair structures that were recorded. */
4391 free_delay_pairs (void)
4395 htab_empty (delay_htab
);
4396 htab_empty (delay_htab_i2
);
4400 /* Fix INSN_TICKs of the instructions in the current block as well as
4401 INSN_TICKs of their dependents.
4402 HEAD and TAIL are the begin and the end of the current scheduled block. */
4404 fix_inter_tick (rtx head
, rtx tail
)
4406 /* Set of instructions with corrected INSN_TICK. */
4407 bitmap_head processed
;
4408 /* ??? It is doubtful if we should assume that cycle advance happens on
4409 basic block boundaries. Basically insns that are unconditionally ready
4410 on the start of the block are more preferable then those which have
4411 a one cycle dependency over insn from the previous block. */
4412 int next_clock
= clock_var
+ 1;
4414 bitmap_initialize (&processed
, 0);
4416 /* Iterates over scheduled instructions and fix their INSN_TICKs and
4417 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
4418 across different blocks. */
4419 for (tail
= NEXT_INSN (tail
); head
!= tail
; head
= NEXT_INSN (head
))
4424 sd_iterator_def sd_it
;
4427 tick
= INSN_TICK (head
);
4428 gcc_assert (tick
>= MIN_TICK
);
4430 /* Fix INSN_TICK of instruction from just scheduled block. */
4431 if (bitmap_set_bit (&processed
, INSN_LUID (head
)))
4435 if (tick
< MIN_TICK
)
4438 INSN_TICK (head
) = tick
;
4441 FOR_EACH_DEP (head
, SD_LIST_RES_FORW
, sd_it
, dep
)
4445 next
= DEP_CON (dep
);
4446 tick
= INSN_TICK (next
);
4448 if (tick
!= INVALID_TICK
4449 /* If NEXT has its INSN_TICK calculated, fix it.
4450 If not - it will be properly calculated from
4451 scratch later in fix_tick_ready. */
4452 && bitmap_set_bit (&processed
, INSN_LUID (next
)))
4456 if (tick
< MIN_TICK
)
4459 if (tick
> INTER_TICK (next
))
4460 INTER_TICK (next
) = tick
;
4462 tick
= INTER_TICK (next
);
4464 INSN_TICK (next
) = tick
;
4469 bitmap_clear (&processed
);
4472 static int haifa_speculate_insn (rtx
, ds_t
, rtx
*);
4474 /* Check if NEXT is ready to be added to the ready or queue list.
4475 If "yes", add it to the proper list.
4477 -1 - is not ready yet,
4478 0 - added to the ready list,
4479 0 < N - queued for N cycles. */
4481 try_ready (rtx next
)
4483 ds_t old_ts
, new_ts
;
4485 old_ts
= TODO_SPEC (next
);
4487 gcc_assert (!(old_ts
& ~(SPECULATIVE
| HARD_DEP
))
4488 && ((old_ts
& HARD_DEP
)
4489 || (old_ts
& SPECULATIVE
)));
4491 if (sd_lists_empty_p (next
, SD_LIST_BACK
))
4492 /* NEXT has all its dependencies resolved. */
4496 /* One of the NEXT's dependencies has been resolved.
4497 Recalculate NEXT's status. */
4499 if (!sd_lists_empty_p (next
, SD_LIST_HARD_BACK
))
4502 /* Now we've got NEXT with speculative deps only.
4503 1. Look at the deps to see what we have to do.
4504 2. Check if we can do 'todo'. */
4506 sd_iterator_def sd_it
;
4508 bool first_p
= true;
4512 FOR_EACH_DEP (next
, SD_LIST_BACK
, sd_it
, dep
)
4514 ds_t ds
= DEP_STATUS (dep
) & SPECULATIVE
;
4516 if (DEBUG_INSN_P (DEP_PRO (dep
))
4517 && !DEBUG_INSN_P (next
))
4527 new_ts
= ds_merge (new_ts
, ds
);
4530 if (ds_weak (new_ts
) < spec_info
->data_weakness_cutoff
)
4531 /* Too few points. */
4536 if (new_ts
& HARD_DEP
)
4537 gcc_assert (new_ts
== HARD_DEP
&& new_ts
== old_ts
4538 && QUEUE_INDEX (next
) == QUEUE_NOWHERE
);
4539 else if (current_sched_info
->new_ready
)
4540 new_ts
= current_sched_info
->new_ready (next
, new_ts
);
4542 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
4543 have its original pattern or changed (speculative) one. This is due
4544 to changing ebb in region scheduling.
4545 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
4546 has speculative pattern.
4548 We can't assert (!(new_ts & HARD_DEP) || new_ts == old_ts) here because
4549 control-speculative NEXT could have been discarded by sched-rgn.c
4550 (the same case as when discarded by can_schedule_ready_p ()). */
4552 if ((new_ts
& SPECULATIVE
)
4553 /* If (old_ts == new_ts), then (old_ts & SPECULATIVE) and we don't
4554 need to change anything. */
4555 && new_ts
!= old_ts
)
4560 gcc_assert (!(new_ts
& ~SPECULATIVE
));
4562 res
= haifa_speculate_insn (next
, new_ts
, &new_pat
);
4567 /* It would be nice to change DEP_STATUS of all dependences,
4568 which have ((DEP_STATUS & SPECULATIVE) == new_ts) to HARD_DEP,
4569 so we won't reanalyze anything. */
4574 /* We follow the rule, that every speculative insn
4575 has non-null ORIG_PAT. */
4576 if (!ORIG_PAT (next
))
4577 ORIG_PAT (next
) = PATTERN (next
);
4581 if (!ORIG_PAT (next
))
4582 /* If we gonna to overwrite the original pattern of insn,
4584 ORIG_PAT (next
) = PATTERN (next
);
4586 haifa_change_pattern (next
, new_pat
);
4594 /* We need to restore pattern only if (new_ts == 0), because otherwise it is
4595 either correct (new_ts & SPECULATIVE),
4596 or we simply don't care (new_ts & HARD_DEP). */
4598 gcc_assert (!ORIG_PAT (next
)
4599 || !IS_SPECULATION_BRANCHY_CHECK_P (next
));
4601 TODO_SPEC (next
) = new_ts
;
4603 if (new_ts
& HARD_DEP
)
4605 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
4606 control-speculative NEXT could have been discarded by sched-rgn.c
4607 (the same case as when discarded by can_schedule_ready_p ()). */
4608 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
4610 change_queue_index (next
, QUEUE_NOWHERE
);
4613 else if (!(new_ts
& BEGIN_SPEC
)
4614 && ORIG_PAT (next
) && !IS_SPECULATION_CHECK_P (next
))
4615 /* We should change pattern of every previously speculative
4616 instruction - and we determine if NEXT was speculative by using
4617 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
4618 pat too, so skip them. */
4620 haifa_change_pattern (next
, ORIG_PAT (next
));
4621 ORIG_PAT (next
) = 0;
4624 if (sched_verbose
>= 2)
4626 fprintf (sched_dump
, ";;\t\tdependencies resolved: insn %s",
4627 (*current_sched_info
->print_insn
) (next
, 0));
4629 if (spec_info
&& spec_info
->dump
)
4631 if (new_ts
& BEGIN_DATA
)
4632 fprintf (spec_info
->dump
, "; data-spec;");
4633 if (new_ts
& BEGIN_CONTROL
)
4634 fprintf (spec_info
->dump
, "; control-spec;");
4635 if (new_ts
& BE_IN_CONTROL
)
4636 fprintf (spec_info
->dump
, "; in-control-spec;");
4639 fprintf (sched_dump
, "\n");
4642 adjust_priority (next
);
4644 return fix_tick_ready (next
);
4647 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
4649 fix_tick_ready (rtx next
)
4653 if (!DEBUG_INSN_P (next
) && !sd_lists_empty_p (next
, SD_LIST_RES_BACK
))
4656 sd_iterator_def sd_it
;
4659 tick
= INSN_TICK (next
);
4660 /* if tick is not equal to INVALID_TICK, then update
4661 INSN_TICK of NEXT with the most recent resolved dependence
4662 cost. Otherwise, recalculate from scratch. */
4663 full_p
= (tick
== INVALID_TICK
);
4665 FOR_EACH_DEP (next
, SD_LIST_RES_BACK
, sd_it
, dep
)
4667 rtx pro
= DEP_PRO (dep
);
4670 gcc_assert (INSN_TICK (pro
) >= MIN_TICK
);
4672 tick1
= INSN_TICK (pro
) + dep_cost (dep
);
4683 INSN_TICK (next
) = tick
;
4685 delay
= tick
- clock_var
;
4686 if (delay
<= 0 || sched_pressure_p
)
4687 delay
= QUEUE_READY
;
4689 change_queue_index (next
, delay
);
4694 /* Move NEXT to the proper queue list with (DELAY >= 1),
4695 or add it to the ready list (DELAY == QUEUE_READY),
4696 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
4698 change_queue_index (rtx next
, int delay
)
4700 int i
= QUEUE_INDEX (next
);
4702 gcc_assert (QUEUE_NOWHERE
<= delay
&& delay
<= max_insn_queue_index
4704 gcc_assert (i
!= QUEUE_SCHEDULED
);
4706 if ((delay
> 0 && NEXT_Q_AFTER (q_ptr
, delay
) == i
)
4707 || (delay
< 0 && delay
== i
))
4708 /* We have nothing to do. */
4711 /* Remove NEXT from wherever it is now. */
4712 if (i
== QUEUE_READY
)
4713 ready_remove_insn (next
);
4715 queue_remove (next
);
4717 /* Add it to the proper place. */
4718 if (delay
== QUEUE_READY
)
4719 ready_add (readyp
, next
, false);
4720 else if (delay
>= 1)
4721 queue_insn (next
, delay
, "change queue index");
4723 if (sched_verbose
>= 2)
4725 fprintf (sched_dump
, ";;\t\ttick updated: insn %s",
4726 (*current_sched_info
->print_insn
) (next
, 0));
4728 if (delay
== QUEUE_READY
)
4729 fprintf (sched_dump
, " into ready\n");
4730 else if (delay
>= 1)
4731 fprintf (sched_dump
, " into queue with cost=%d\n", delay
);
4733 fprintf (sched_dump
, " removed from ready or queue lists\n");
4737 static int sched_ready_n_insns
= -1;
4739 /* Initialize per region data structures. */
4741 sched_extend_ready_list (int new_sched_ready_n_insns
)
4745 if (sched_ready_n_insns
== -1)
4746 /* At the first call we need to initialize one more choice_stack
4750 sched_ready_n_insns
= 0;
4751 VEC_reserve (rtx
, heap
, scheduled_insns
, new_sched_ready_n_insns
);
4754 i
= sched_ready_n_insns
+ 1;
4756 ready
.veclen
= new_sched_ready_n_insns
+ issue_rate
;
4757 ready
.vec
= XRESIZEVEC (rtx
, ready
.vec
, ready
.veclen
);
4759 gcc_assert (new_sched_ready_n_insns
>= sched_ready_n_insns
);
4761 ready_try
= (char *) xrecalloc (ready_try
, new_sched_ready_n_insns
,
4762 sched_ready_n_insns
, sizeof (*ready_try
));
4764 /* We allocate +1 element to save initial state in the choice_stack[0]
4766 choice_stack
= XRESIZEVEC (struct choice_entry
, choice_stack
,
4767 new_sched_ready_n_insns
+ 1);
4769 for (; i
<= new_sched_ready_n_insns
; i
++)
4771 choice_stack
[i
].state
= xmalloc (dfa_state_size
);
4773 if (targetm
.sched
.first_cycle_multipass_init
)
4774 targetm
.sched
.first_cycle_multipass_init (&(choice_stack
[i
]
4778 sched_ready_n_insns
= new_sched_ready_n_insns
;
4781 /* Free per region data structures. */
4783 sched_finish_ready_list (void)
4794 for (i
= 0; i
<= sched_ready_n_insns
; i
++)
4796 if (targetm
.sched
.first_cycle_multipass_fini
)
4797 targetm
.sched
.first_cycle_multipass_fini (&(choice_stack
[i
]
4800 free (choice_stack
[i
].state
);
4802 free (choice_stack
);
4803 choice_stack
= NULL
;
4805 sched_ready_n_insns
= -1;
4809 haifa_luid_for_non_insn (rtx x
)
4811 gcc_assert (NOTE_P (x
) || LABEL_P (x
));
4816 /* Generates recovery code for INSN. */
4818 generate_recovery_code (rtx insn
)
4820 if (TODO_SPEC (insn
) & BEGIN_SPEC
)
4821 begin_speculative_block (insn
);
4823 /* Here we have insn with no dependencies to
4824 instructions other then CHECK_SPEC ones. */
4826 if (TODO_SPEC (insn
) & BE_IN_SPEC
)
4827 add_to_speculative_block (insn
);
4831 Tries to add speculative dependencies of type FS between instructions
4832 in deps_list L and TWIN. */
4834 process_insn_forw_deps_be_in_spec (rtx insn
, rtx twin
, ds_t fs
)
4836 sd_iterator_def sd_it
;
4839 FOR_EACH_DEP (insn
, SD_LIST_FORW
, sd_it
, dep
)
4844 consumer
= DEP_CON (dep
);
4846 ds
= DEP_STATUS (dep
);
4848 if (/* If we want to create speculative dep. */
4850 /* And we can do that because this is a true dep. */
4851 && (ds
& DEP_TYPES
) == DEP_TRUE
)
4853 gcc_assert (!(ds
& BE_IN_SPEC
));
4855 if (/* If this dep can be overcome with 'begin speculation'. */
4857 /* Then we have a choice: keep the dep 'begin speculative'
4858 or transform it into 'be in speculative'. */
4860 if (/* In try_ready we assert that if insn once became ready
4861 it can be removed from the ready (or queue) list only
4862 due to backend decision. Hence we can't let the
4863 probability of the speculative dep to decrease. */
4864 ds_weak (ds
) <= ds_weak (fs
))
4868 new_ds
= (ds
& ~BEGIN_SPEC
) | fs
;
4870 if (/* consumer can 'be in speculative'. */
4871 sched_insn_is_legitimate_for_speculation_p (consumer
,
4873 /* Transform it to be in speculative. */
4878 /* Mark the dep as 'be in speculative'. */
4883 dep_def _new_dep
, *new_dep
= &_new_dep
;
4885 init_dep_1 (new_dep
, twin
, consumer
, DEP_TYPE (dep
), ds
);
4886 sd_add_dep (new_dep
, false);
4891 /* Generates recovery code for BEGIN speculative INSN. */
4893 begin_speculative_block (rtx insn
)
4895 if (TODO_SPEC (insn
) & BEGIN_DATA
)
4897 if (TODO_SPEC (insn
) & BEGIN_CONTROL
)
4900 create_check_block_twin (insn
, false);
4902 TODO_SPEC (insn
) &= ~BEGIN_SPEC
;
4905 static void haifa_init_insn (rtx
);
4907 /* Generates recovery code for BE_IN speculative INSN. */
4909 add_to_speculative_block (rtx insn
)
4912 sd_iterator_def sd_it
;
4915 rtx_vec_t priorities_roots
;
4917 ts
= TODO_SPEC (insn
);
4918 gcc_assert (!(ts
& ~BE_IN_SPEC
));
4920 if (ts
& BE_IN_DATA
)
4922 if (ts
& BE_IN_CONTROL
)
4925 TODO_SPEC (insn
) &= ~BE_IN_SPEC
;
4926 gcc_assert (!TODO_SPEC (insn
));
4928 DONE_SPEC (insn
) |= ts
;
4930 /* First we convert all simple checks to branchy. */
4931 for (sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
4932 sd_iterator_cond (&sd_it
, &dep
);)
4934 rtx check
= DEP_PRO (dep
);
4936 if (IS_SPECULATION_SIMPLE_CHECK_P (check
))
4938 create_check_block_twin (check
, true);
4940 /* Restart search. */
4941 sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
4944 /* Continue search. */
4945 sd_iterator_next (&sd_it
);
4948 priorities_roots
= NULL
;
4949 clear_priorities (insn
, &priorities_roots
);
4956 /* Get the first backward dependency of INSN. */
4957 sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
4958 if (!sd_iterator_cond (&sd_it
, &dep
))
4959 /* INSN has no backward dependencies left. */
4962 gcc_assert ((DEP_STATUS (dep
) & BEGIN_SPEC
) == 0
4963 && (DEP_STATUS (dep
) & BE_IN_SPEC
) != 0
4964 && (DEP_STATUS (dep
) & DEP_TYPES
) == DEP_TRUE
);
4966 check
= DEP_PRO (dep
);
4968 gcc_assert (!IS_SPECULATION_CHECK_P (check
) && !ORIG_PAT (check
)
4969 && QUEUE_INDEX (check
) == QUEUE_NOWHERE
);
4971 rec
= BLOCK_FOR_INSN (check
);
4973 twin
= emit_insn_before (copy_insn (PATTERN (insn
)), BB_END (rec
));
4974 haifa_init_insn (twin
);
4976 sd_copy_back_deps (twin
, insn
, true);
4978 if (sched_verbose
&& spec_info
->dump
)
4979 /* INSN_BB (insn) isn't determined for twin insns yet.
4980 So we can't use current_sched_info->print_insn. */
4981 fprintf (spec_info
->dump
, ";;\t\tGenerated twin insn : %d/rec%d\n",
4982 INSN_UID (twin
), rec
->index
);
4984 twins
= alloc_INSN_LIST (twin
, twins
);
4986 /* Add dependences between TWIN and all appropriate
4987 instructions from REC. */
4988 FOR_EACH_DEP (insn
, SD_LIST_SPEC_BACK
, sd_it
, dep
)
4990 rtx pro
= DEP_PRO (dep
);
4992 gcc_assert (DEP_TYPE (dep
) == REG_DEP_TRUE
);
4994 /* INSN might have dependencies from the instructions from
4995 several recovery blocks. At this iteration we process those
4996 producers that reside in REC. */
4997 if (BLOCK_FOR_INSN (pro
) == rec
)
4999 dep_def _new_dep
, *new_dep
= &_new_dep
;
5001 init_dep (new_dep
, pro
, twin
, REG_DEP_TRUE
);
5002 sd_add_dep (new_dep
, false);
5006 process_insn_forw_deps_be_in_spec (insn
, twin
, ts
);
5008 /* Remove all dependencies between INSN and insns in REC. */
5009 for (sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
5010 sd_iterator_cond (&sd_it
, &dep
);)
5012 rtx pro
= DEP_PRO (dep
);
5014 if (BLOCK_FOR_INSN (pro
) == rec
)
5015 sd_delete_dep (sd_it
);
5017 sd_iterator_next (&sd_it
);
5021 /* We couldn't have added the dependencies between INSN and TWINS earlier
5022 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
5027 twin
= XEXP (twins
, 0);
5030 dep_def _new_dep
, *new_dep
= &_new_dep
;
5032 init_dep (new_dep
, insn
, twin
, REG_DEP_OUTPUT
);
5033 sd_add_dep (new_dep
, false);
5036 twin
= XEXP (twins
, 1);
5037 free_INSN_LIST_node (twins
);
5041 calc_priorities (priorities_roots
);
5042 VEC_free (rtx
, heap
, priorities_roots
);
5045 /* Extends and fills with zeros (only the new part) array pointed to by P. */
5047 xrecalloc (void *p
, size_t new_nmemb
, size_t old_nmemb
, size_t size
)
5049 gcc_assert (new_nmemb
>= old_nmemb
);
5050 p
= XRESIZEVAR (void, p
, new_nmemb
* size
);
5051 memset (((char *) p
) + old_nmemb
* size
, 0, (new_nmemb
- old_nmemb
) * size
);
5056 Find fallthru edge from PRED. */
5058 find_fallthru_edge_from (basic_block pred
)
5063 succ
= pred
->next_bb
;
5064 gcc_assert (succ
->prev_bb
== pred
);
5066 if (EDGE_COUNT (pred
->succs
) <= EDGE_COUNT (succ
->preds
))
5068 e
= find_fallthru_edge (pred
->succs
);
5072 gcc_assert (e
->dest
== succ
);
5078 e
= find_fallthru_edge (succ
->preds
);
5082 gcc_assert (e
->src
== pred
);
5090 /* Extend per basic block data structures. */
5092 sched_extend_bb (void)
5096 /* The following is done to keep current_sched_info->next_tail non null. */
5097 insn
= BB_END (EXIT_BLOCK_PTR
->prev_bb
);
5098 if (NEXT_INSN (insn
) == 0
5101 /* Don't emit a NOTE if it would end up before a BARRIER. */
5102 && !BARRIER_P (NEXT_INSN (insn
))))
5104 rtx note
= emit_note_after (NOTE_INSN_DELETED
, insn
);
5105 /* Make insn appear outside BB. */
5106 set_block_for_insn (note
, NULL
);
5107 BB_END (EXIT_BLOCK_PTR
->prev_bb
) = insn
;
5111 /* Init per basic block data structures. */
5113 sched_init_bbs (void)
5118 /* Initialize BEFORE_RECOVERY variable. */
5120 init_before_recovery (basic_block
*before_recovery_ptr
)
5125 last
= EXIT_BLOCK_PTR
->prev_bb
;
5126 e
= find_fallthru_edge_from (last
);
5130 /* We create two basic blocks:
5131 1. Single instruction block is inserted right after E->SRC
5133 2. Empty block right before EXIT_BLOCK.
5134 Between these two blocks recovery blocks will be emitted. */
5136 basic_block single
, empty
;
5139 /* If the fallthrough edge to exit we've found is from the block we've
5140 created before, don't do anything more. */
5141 if (last
== after_recovery
)
5144 adding_bb_to_current_region_p
= false;
5146 single
= sched_create_empty_bb (last
);
5147 empty
= sched_create_empty_bb (single
);
5149 /* Add new blocks to the root loop. */
5150 if (current_loops
!= NULL
)
5152 add_bb_to_loop (single
, VEC_index (loop_p
, current_loops
->larray
, 0));
5153 add_bb_to_loop (empty
, VEC_index (loop_p
, current_loops
->larray
, 0));
5156 single
->count
= last
->count
;
5157 empty
->count
= last
->count
;
5158 single
->frequency
= last
->frequency
;
5159 empty
->frequency
= last
->frequency
;
5160 BB_COPY_PARTITION (single
, last
);
5161 BB_COPY_PARTITION (empty
, last
);
5163 redirect_edge_succ (e
, single
);
5164 make_single_succ_edge (single
, empty
, 0);
5165 make_single_succ_edge (empty
, EXIT_BLOCK_PTR
,
5166 EDGE_FALLTHRU
| EDGE_CAN_FALLTHRU
);
5168 label
= block_label (empty
);
5169 x
= emit_jump_insn_after (gen_jump (label
), BB_END (single
));
5170 JUMP_LABEL (x
) = label
;
5171 LABEL_NUSES (label
)++;
5172 haifa_init_insn (x
);
5174 emit_barrier_after (x
);
5176 sched_init_only_bb (empty
, NULL
);
5177 sched_init_only_bb (single
, NULL
);
5180 adding_bb_to_current_region_p
= true;
5181 before_recovery
= single
;
5182 after_recovery
= empty
;
5184 if (before_recovery_ptr
)
5185 *before_recovery_ptr
= before_recovery
;
5187 if (sched_verbose
>= 2 && spec_info
->dump
)
5188 fprintf (spec_info
->dump
,
5189 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
5190 last
->index
, single
->index
, empty
->index
);
5193 before_recovery
= last
;
5196 /* Returns new recovery block. */
5198 sched_create_recovery_block (basic_block
*before_recovery_ptr
)
5204 haifa_recovery_bb_recently_added_p
= true;
5205 haifa_recovery_bb_ever_added_p
= true;
5207 init_before_recovery (before_recovery_ptr
);
5209 barrier
= get_last_bb_insn (before_recovery
);
5210 gcc_assert (BARRIER_P (barrier
));
5212 label
= emit_label_after (gen_label_rtx (), barrier
);
5214 rec
= create_basic_block (label
, label
, before_recovery
);
5216 /* A recovery block always ends with an unconditional jump. */
5217 emit_barrier_after (BB_END (rec
));
5219 if (BB_PARTITION (before_recovery
) != BB_UNPARTITIONED
)
5220 BB_SET_PARTITION (rec
, BB_COLD_PARTITION
);
5222 if (sched_verbose
&& spec_info
->dump
)
5223 fprintf (spec_info
->dump
, ";;\t\tGenerated recovery block rec%d\n",
5229 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
5230 and emit necessary jumps. */
5232 sched_create_recovery_edges (basic_block first_bb
, basic_block rec
,
5233 basic_block second_bb
)
5239 /* This is fixing of incoming edge. */
5240 /* ??? Which other flags should be specified? */
5241 if (BB_PARTITION (first_bb
) != BB_PARTITION (rec
))
5242 /* Partition type is the same, if it is "unpartitioned". */
5243 edge_flags
= EDGE_CROSSING
;
5247 make_edge (first_bb
, rec
, edge_flags
);
5248 label
= block_label (second_bb
);
5249 jump
= emit_jump_insn_after (gen_jump (label
), BB_END (rec
));
5250 JUMP_LABEL (jump
) = label
;
5251 LABEL_NUSES (label
)++;
5253 if (BB_PARTITION (second_bb
) != BB_PARTITION (rec
))
5254 /* Partition type is the same, if it is "unpartitioned". */
5256 /* Rewritten from cfgrtl.c. */
5257 if (flag_reorder_blocks_and_partition
5258 && targetm_common
.have_named_sections
)
5260 /* We don't need the same note for the check because
5261 any_condjump_p (check) == true. */
5262 add_reg_note (jump
, REG_CROSSING_JUMP
, NULL_RTX
);
5264 edge_flags
= EDGE_CROSSING
;
5269 make_single_succ_edge (rec
, second_bb
, edge_flags
);
5270 if (dom_info_available_p (CDI_DOMINATORS
))
5271 set_immediate_dominator (CDI_DOMINATORS
, rec
, first_bb
);
5274 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
5275 INSN is a simple check, that should be converted to branchy one. */
5277 create_check_block_twin (rtx insn
, bool mutate_p
)
5280 rtx label
, check
, twin
;
5282 sd_iterator_def sd_it
;
5284 dep_def _new_dep
, *new_dep
= &_new_dep
;
5287 gcc_assert (ORIG_PAT (insn
) != NULL_RTX
);
5290 todo_spec
= TODO_SPEC (insn
);
5293 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn
)
5294 && (TODO_SPEC (insn
) & SPECULATIVE
) == 0);
5296 todo_spec
= CHECK_SPEC (insn
);
5299 todo_spec
&= SPECULATIVE
;
5301 /* Create recovery block. */
5302 if (mutate_p
|| targetm
.sched
.needs_block_p (todo_spec
))
5304 rec
= sched_create_recovery_block (NULL
);
5305 label
= BB_HEAD (rec
);
5309 rec
= EXIT_BLOCK_PTR
;
5314 check
= targetm
.sched
.gen_spec_check (insn
, label
, todo_spec
);
5316 if (rec
!= EXIT_BLOCK_PTR
)
5318 /* To have mem_reg alive at the beginning of second_bb,
5319 we emit check BEFORE insn, so insn after splitting
5320 insn will be at the beginning of second_bb, which will
5321 provide us with the correct life information. */
5322 check
= emit_jump_insn_before (check
, insn
);
5323 JUMP_LABEL (check
) = label
;
5324 LABEL_NUSES (label
)++;
5327 check
= emit_insn_before (check
, insn
);
5329 /* Extend data structures. */
5330 haifa_init_insn (check
);
5332 /* CHECK is being added to current region. Extend ready list. */
5333 gcc_assert (sched_ready_n_insns
!= -1);
5334 sched_extend_ready_list (sched_ready_n_insns
+ 1);
5336 if (current_sched_info
->add_remove_insn
)
5337 current_sched_info
->add_remove_insn (insn
, 0);
5339 RECOVERY_BLOCK (check
) = rec
;
5341 if (sched_verbose
&& spec_info
->dump
)
5342 fprintf (spec_info
->dump
, ";;\t\tGenerated check insn : %s\n",
5343 (*current_sched_info
->print_insn
) (check
, 0));
5345 gcc_assert (ORIG_PAT (insn
));
5347 /* Initialize TWIN (twin is a duplicate of original instruction
5348 in the recovery block). */
5349 if (rec
!= EXIT_BLOCK_PTR
)
5351 sd_iterator_def sd_it
;
5354 FOR_EACH_DEP (insn
, SD_LIST_RES_BACK
, sd_it
, dep
)
5355 if ((DEP_STATUS (dep
) & DEP_OUTPUT
) != 0)
5357 struct _dep _dep2
, *dep2
= &_dep2
;
5359 init_dep (dep2
, DEP_PRO (dep
), check
, REG_DEP_TRUE
);
5361 sd_add_dep (dep2
, true);
5364 twin
= emit_insn_after (ORIG_PAT (insn
), BB_END (rec
));
5365 haifa_init_insn (twin
);
5367 if (sched_verbose
&& spec_info
->dump
)
5368 /* INSN_BB (insn) isn't determined for twin insns yet.
5369 So we can't use current_sched_info->print_insn. */
5370 fprintf (spec_info
->dump
, ";;\t\tGenerated twin insn : %d/rec%d\n",
5371 INSN_UID (twin
), rec
->index
);
5375 ORIG_PAT (check
) = ORIG_PAT (insn
);
5376 HAS_INTERNAL_DEP (check
) = 1;
5378 /* ??? We probably should change all OUTPUT dependencies to
5382 /* Copy all resolved back dependencies of INSN to TWIN. This will
5383 provide correct value for INSN_TICK (TWIN). */
5384 sd_copy_back_deps (twin
, insn
, true);
5386 if (rec
!= EXIT_BLOCK_PTR
)
5387 /* In case of branchy check, fix CFG. */
5389 basic_block first_bb
, second_bb
;
5392 first_bb
= BLOCK_FOR_INSN (check
);
5393 second_bb
= sched_split_block (first_bb
, check
);
5395 sched_create_recovery_edges (first_bb
, rec
, second_bb
);
5397 sched_init_only_bb (second_bb
, first_bb
);
5398 sched_init_only_bb (rec
, EXIT_BLOCK_PTR
);
5400 jump
= BB_END (rec
);
5401 haifa_init_insn (jump
);
5404 /* Move backward dependences from INSN to CHECK and
5405 move forward dependences from INSN to TWIN. */
5407 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
5408 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
5410 rtx pro
= DEP_PRO (dep
);
5413 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
5414 check --TRUE--> producer ??? or ANTI ???
5415 twin --TRUE--> producer
5416 twin --ANTI--> check
5418 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
5419 check --ANTI--> producer
5420 twin --ANTI--> producer
5421 twin --ANTI--> check
5423 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
5424 check ~~TRUE~~> producer
5425 twin ~~TRUE~~> producer
5426 twin --ANTI--> check */
5428 ds
= DEP_STATUS (dep
);
5430 if (ds
& BEGIN_SPEC
)
5432 gcc_assert (!mutate_p
);
5436 init_dep_1 (new_dep
, pro
, check
, DEP_TYPE (dep
), ds
);
5437 sd_add_dep (new_dep
, false);
5439 if (rec
!= EXIT_BLOCK_PTR
)
5441 DEP_CON (new_dep
) = twin
;
5442 sd_add_dep (new_dep
, false);
5446 /* Second, remove backward dependencies of INSN. */
5447 for (sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
5448 sd_iterator_cond (&sd_it
, &dep
);)
5450 if ((DEP_STATUS (dep
) & BEGIN_SPEC
)
5452 /* We can delete this dep because we overcome it with
5453 BEGIN_SPECULATION. */
5454 sd_delete_dep (sd_it
);
5456 sd_iterator_next (&sd_it
);
5459 /* Future Speculations. Determine what BE_IN speculations will be like. */
5462 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
5465 gcc_assert (!DONE_SPEC (insn
));
5469 ds_t ts
= TODO_SPEC (insn
);
5471 DONE_SPEC (insn
) = ts
& BEGIN_SPEC
;
5472 CHECK_SPEC (check
) = ts
& BEGIN_SPEC
;
5474 /* Luckiness of future speculations solely depends upon initial
5475 BEGIN speculation. */
5476 if (ts
& BEGIN_DATA
)
5477 fs
= set_dep_weak (fs
, BE_IN_DATA
, get_dep_weak (ts
, BEGIN_DATA
));
5478 if (ts
& BEGIN_CONTROL
)
5479 fs
= set_dep_weak (fs
, BE_IN_CONTROL
,
5480 get_dep_weak (ts
, BEGIN_CONTROL
));
5483 CHECK_SPEC (check
) = CHECK_SPEC (insn
);
5485 /* Future speculations: call the helper. */
5486 process_insn_forw_deps_be_in_spec (insn
, twin
, fs
);
5488 if (rec
!= EXIT_BLOCK_PTR
)
5490 /* Which types of dependencies should we use here is,
5491 generally, machine-dependent question... But, for now,
5496 init_dep (new_dep
, insn
, check
, REG_DEP_TRUE
);
5497 sd_add_dep (new_dep
, false);
5499 init_dep (new_dep
, insn
, twin
, REG_DEP_OUTPUT
);
5500 sd_add_dep (new_dep
, false);
5504 if (spec_info
->dump
)
5505 fprintf (spec_info
->dump
, ";;\t\tRemoved simple check : %s\n",
5506 (*current_sched_info
->print_insn
) (insn
, 0));
5508 /* Remove all dependencies of the INSN. */
5510 sd_it
= sd_iterator_start (insn
, (SD_LIST_FORW
5512 | SD_LIST_RES_BACK
));
5513 while (sd_iterator_cond (&sd_it
, &dep
))
5514 sd_delete_dep (sd_it
);
5517 /* If former check (INSN) already was moved to the ready (or queue)
5518 list, add new check (CHECK) there too. */
5519 if (QUEUE_INDEX (insn
) != QUEUE_NOWHERE
)
5522 /* Remove old check from instruction stream and free its
5524 sched_remove_insn (insn
);
5527 init_dep (new_dep
, check
, twin
, REG_DEP_ANTI
);
5528 sd_add_dep (new_dep
, false);
5532 init_dep_1 (new_dep
, insn
, check
, REG_DEP_TRUE
, DEP_TRUE
| DEP_OUTPUT
);
5533 sd_add_dep (new_dep
, false);
5537 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
5538 because it'll be done later in add_to_speculative_block. */
5540 rtx_vec_t priorities_roots
= NULL
;
5542 clear_priorities (twin
, &priorities_roots
);
5543 calc_priorities (priorities_roots
);
5544 VEC_free (rtx
, heap
, priorities_roots
);
5548 /* Removes dependency between instructions in the recovery block REC
5549 and usual region instructions. It keeps inner dependences so it
5550 won't be necessary to recompute them. */
5552 fix_recovery_deps (basic_block rec
)
5554 rtx note
, insn
, jump
, ready_list
= 0;
5555 bitmap_head in_ready
;
5558 bitmap_initialize (&in_ready
, 0);
5560 /* NOTE - a basic block note. */
5561 note
= NEXT_INSN (BB_HEAD (rec
));
5562 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
5563 insn
= BB_END (rec
);
5564 gcc_assert (JUMP_P (insn
));
5565 insn
= PREV_INSN (insn
);
5569 sd_iterator_def sd_it
;
5572 for (sd_it
= sd_iterator_start (insn
, SD_LIST_FORW
);
5573 sd_iterator_cond (&sd_it
, &dep
);)
5575 rtx consumer
= DEP_CON (dep
);
5577 if (BLOCK_FOR_INSN (consumer
) != rec
)
5579 sd_delete_dep (sd_it
);
5581 if (bitmap_set_bit (&in_ready
, INSN_LUID (consumer
)))
5582 ready_list
= alloc_INSN_LIST (consumer
, ready_list
);
5586 gcc_assert ((DEP_STATUS (dep
) & DEP_TYPES
) == DEP_TRUE
);
5588 sd_iterator_next (&sd_it
);
5592 insn
= PREV_INSN (insn
);
5594 while (insn
!= note
);
5596 bitmap_clear (&in_ready
);
5598 /* Try to add instructions to the ready or queue list. */
5599 for (link
= ready_list
; link
; link
= XEXP (link
, 1))
5600 try_ready (XEXP (link
, 0));
5601 free_INSN_LIST_list (&ready_list
);
5603 /* Fixing jump's dependences. */
5604 insn
= BB_HEAD (rec
);
5605 jump
= BB_END (rec
);
5607 gcc_assert (LABEL_P (insn
));
5608 insn
= NEXT_INSN (insn
);
5610 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn
));
5611 add_jump_dependencies (insn
, jump
);
5614 /* Change pattern of INSN to NEW_PAT. */
5616 sched_change_pattern (rtx insn
, rtx new_pat
)
5618 sd_iterator_def sd_it
;
5622 t
= validate_change (insn
, &PATTERN (insn
), new_pat
, 0);
5624 dfa_clear_single_insn_cache (insn
);
5626 for (sd_it
= sd_iterator_start (insn
, (SD_LIST_FORW
| SD_LIST_BACK
5627 | SD_LIST_RES_BACK
));
5628 sd_iterator_cond (&sd_it
, &dep
);)
5630 DEP_COST (dep
) = UNKNOWN_DEP_COST
;
5631 sd_iterator_next (&sd_it
);
5635 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
5636 instruction data. */
5638 haifa_change_pattern (rtx insn
, rtx new_pat
)
5640 sched_change_pattern (insn
, new_pat
);
5642 /* Invalidate INSN_COST, so it'll be recalculated. */
5643 INSN_COST (insn
) = -1;
5644 /* Invalidate INSN_TICK, so it'll be recalculated. */
5645 INSN_TICK (insn
) = INVALID_TICK
;
5648 /* -1 - can't speculate,
5649 0 - for speculation with REQUEST mode it is OK to use
5650 current instruction pattern,
5651 1 - need to change pattern for *NEW_PAT to be speculative. */
5653 sched_speculate_insn (rtx insn
, ds_t request
, rtx
*new_pat
)
5655 gcc_assert (current_sched_info
->flags
& DO_SPECULATION
5656 && (request
& SPECULATIVE
)
5657 && sched_insn_is_legitimate_for_speculation_p (insn
, request
));
5659 if ((request
& spec_info
->mask
) != request
)
5662 if (request
& BE_IN_SPEC
5663 && !(request
& BEGIN_SPEC
))
5666 return targetm
.sched
.speculate_insn (insn
, request
, new_pat
);
5670 haifa_speculate_insn (rtx insn
, ds_t request
, rtx
*new_pat
)
5672 gcc_assert (sched_deps_info
->generate_spec_deps
5673 && !IS_SPECULATION_CHECK_P (insn
));
5675 if (HAS_INTERNAL_DEP (insn
)
5676 || SCHED_GROUP_P (insn
))
5679 return sched_speculate_insn (insn
, request
, new_pat
);
5682 /* Print some information about block BB, which starts with HEAD and
5683 ends with TAIL, before scheduling it.
5684 I is zero, if scheduler is about to start with the fresh ebb. */
5686 dump_new_block_header (int i
, basic_block bb
, rtx head
, rtx tail
)
5689 fprintf (sched_dump
,
5690 ";; ======================================================\n");
5692 fprintf (sched_dump
,
5693 ";; =====================ADVANCING TO=====================\n");
5694 fprintf (sched_dump
,
5695 ";; -- basic block %d from %d to %d -- %s reload\n",
5696 bb
->index
, INSN_UID (head
), INSN_UID (tail
),
5697 (reload_completed
? "after" : "before"));
5698 fprintf (sched_dump
,
5699 ";; ======================================================\n");
5700 fprintf (sched_dump
, "\n");
5703 /* Unlink basic block notes and labels and saves them, so they
5704 can be easily restored. We unlink basic block notes in EBB to
5705 provide back-compatibility with the previous code, as target backends
5706 assume, that there'll be only instructions between
5707 current_sched_info->{head and tail}. We restore these notes as soon
5709 FIRST (LAST) is the first (last) basic block in the ebb.
5710 NB: In usual case (FIRST == LAST) nothing is really done. */
5712 unlink_bb_notes (basic_block first
, basic_block last
)
5714 /* We DON'T unlink basic block notes of the first block in the ebb. */
5718 bb_header
= XNEWVEC (rtx
, last_basic_block
);
5720 /* Make a sentinel. */
5721 if (last
->next_bb
!= EXIT_BLOCK_PTR
)
5722 bb_header
[last
->next_bb
->index
] = 0;
5724 first
= first
->next_bb
;
5727 rtx prev
, label
, note
, next
;
5729 label
= BB_HEAD (last
);
5730 if (LABEL_P (label
))
5731 note
= NEXT_INSN (label
);
5734 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
5736 prev
= PREV_INSN (label
);
5737 next
= NEXT_INSN (note
);
5738 gcc_assert (prev
&& next
);
5740 NEXT_INSN (prev
) = next
;
5741 PREV_INSN (next
) = prev
;
5743 bb_header
[last
->index
] = label
;
5748 last
= last
->prev_bb
;
5753 /* Restore basic block notes.
5754 FIRST is the first basic block in the ebb. */
5756 restore_bb_notes (basic_block first
)
5761 /* We DON'T unlink basic block notes of the first block in the ebb. */
5762 first
= first
->next_bb
;
5763 /* Remember: FIRST is actually a second basic block in the ebb. */
5765 while (first
!= EXIT_BLOCK_PTR
5766 && bb_header
[first
->index
])
5768 rtx prev
, label
, note
, next
;
5770 label
= bb_header
[first
->index
];
5771 prev
= PREV_INSN (label
);
5772 next
= NEXT_INSN (prev
);
5774 if (LABEL_P (label
))
5775 note
= NEXT_INSN (label
);
5778 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
5780 bb_header
[first
->index
] = 0;
5782 NEXT_INSN (prev
) = label
;
5783 NEXT_INSN (note
) = next
;
5784 PREV_INSN (next
) = note
;
5786 first
= first
->next_bb
;
5794 Fix CFG after both in- and inter-block movement of
5795 control_flow_insn_p JUMP. */
5797 fix_jump_move (rtx jump
)
5799 basic_block bb
, jump_bb
, jump_bb_next
;
5801 bb
= BLOCK_FOR_INSN (PREV_INSN (jump
));
5802 jump_bb
= BLOCK_FOR_INSN (jump
);
5803 jump_bb_next
= jump_bb
->next_bb
;
5805 gcc_assert (common_sched_info
->sched_pass_id
== SCHED_EBB_PASS
5806 || IS_SPECULATION_BRANCHY_CHECK_P (jump
));
5808 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next
)))
5809 /* if jump_bb_next is not empty. */
5810 BB_END (jump_bb
) = BB_END (jump_bb_next
);
5812 if (BB_END (bb
) != PREV_INSN (jump
))
5813 /* Then there are instruction after jump that should be placed
5815 BB_END (jump_bb_next
) = BB_END (bb
);
5817 /* Otherwise jump_bb_next is empty. */
5818 BB_END (jump_bb_next
) = NEXT_INSN (BB_HEAD (jump_bb_next
));
5820 /* To make assertion in move_insn happy. */
5821 BB_END (bb
) = PREV_INSN (jump
);
5823 update_bb_for_insn (jump_bb_next
);
5826 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
5828 move_block_after_check (rtx jump
)
5830 basic_block bb
, jump_bb
, jump_bb_next
;
5833 bb
= BLOCK_FOR_INSN (PREV_INSN (jump
));
5834 jump_bb
= BLOCK_FOR_INSN (jump
);
5835 jump_bb_next
= jump_bb
->next_bb
;
5837 update_bb_for_insn (jump_bb
);
5839 gcc_assert (IS_SPECULATION_CHECK_P (jump
)
5840 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next
)));
5842 unlink_block (jump_bb_next
);
5843 link_block (jump_bb_next
, bb
);
5847 move_succs (&(jump_bb
->succs
), bb
);
5848 move_succs (&(jump_bb_next
->succs
), jump_bb
);
5849 move_succs (&t
, jump_bb_next
);
5851 df_mark_solutions_dirty ();
5853 common_sched_info
->fix_recovery_cfg
5854 (bb
->index
, jump_bb
->index
, jump_bb_next
->index
);
5857 /* Helper function for move_block_after_check.
5858 This functions attaches edge vector pointed to by SUCCSP to
5861 move_succs (VEC(edge
,gc
) **succsp
, basic_block to
)
5866 gcc_assert (to
->succs
== 0);
5868 to
->succs
= *succsp
;
5870 FOR_EACH_EDGE (e
, ei
, to
->succs
)
5876 /* Remove INSN from the instruction stream.
5877 INSN should have any dependencies. */
5879 sched_remove_insn (rtx insn
)
5881 sd_finish_insn (insn
);
5883 change_queue_index (insn
, QUEUE_NOWHERE
);
5884 current_sched_info
->add_remove_insn (insn
, 1);
5888 /* Clear priorities of all instructions, that are forward dependent on INSN.
5889 Store in vector pointed to by ROOTS_PTR insns on which priority () should
5890 be invoked to initialize all cleared priorities. */
5892 clear_priorities (rtx insn
, rtx_vec_t
*roots_ptr
)
5894 sd_iterator_def sd_it
;
5896 bool insn_is_root_p
= true;
5898 gcc_assert (QUEUE_INDEX (insn
) != QUEUE_SCHEDULED
);
5900 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
5902 rtx pro
= DEP_PRO (dep
);
5904 if (INSN_PRIORITY_STATUS (pro
) >= 0
5905 && QUEUE_INDEX (insn
) != QUEUE_SCHEDULED
)
5907 /* If DEP doesn't contribute to priority then INSN itself should
5908 be added to priority roots. */
5909 if (contributes_to_priority_p (dep
))
5910 insn_is_root_p
= false;
5912 INSN_PRIORITY_STATUS (pro
) = -1;
5913 clear_priorities (pro
, roots_ptr
);
5918 VEC_safe_push (rtx
, heap
, *roots_ptr
, insn
);
5921 /* Recompute priorities of instructions, whose priorities might have been
5922 changed. ROOTS is a vector of instructions whose priority computation will
5923 trigger initialization of all cleared priorities. */
5925 calc_priorities (rtx_vec_t roots
)
5930 FOR_EACH_VEC_ELT (rtx
, roots
, i
, insn
)
5935 /* Add dependences between JUMP and other instructions in the recovery
5936 block. INSN is the first insn the recovery block. */
5938 add_jump_dependencies (rtx insn
, rtx jump
)
5942 insn
= NEXT_INSN (insn
);
5946 if (dep_list_size (insn
) == 0)
5948 dep_def _new_dep
, *new_dep
= &_new_dep
;
5950 init_dep (new_dep
, insn
, jump
, REG_DEP_ANTI
);
5951 sd_add_dep (new_dep
, false);
5956 gcc_assert (!sd_lists_empty_p (jump
, SD_LIST_BACK
));
5959 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
5961 bb_note (basic_block bb
)
5965 note
= BB_HEAD (bb
);
5967 note
= NEXT_INSN (note
);
5969 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
5973 #ifdef ENABLE_CHECKING
5974 /* Helper function for check_cfg.
5975 Return nonzero, if edge vector pointed to by EL has edge with TYPE in
5978 has_edge_p (VEC(edge
,gc
) *el
, int type
)
5983 FOR_EACH_EDGE (e
, ei
, el
)
5984 if (e
->flags
& type
)
5989 /* Search back, starting at INSN, for an insn that is not a
5990 NOTE_INSN_VAR_LOCATION. Don't search beyond HEAD, and return it if
5991 no such insn can be found. */
5993 prev_non_location_insn (rtx insn
, rtx head
)
5995 while (insn
!= head
&& NOTE_P (insn
)
5996 && NOTE_KIND (insn
) == NOTE_INSN_VAR_LOCATION
)
5997 insn
= PREV_INSN (insn
);
6002 /* Check few properties of CFG between HEAD and TAIL.
6003 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
6004 instruction stream. */
6006 check_cfg (rtx head
, rtx tail
)
6010 int not_first
= 0, not_last
;
6013 head
= get_insns ();
6015 tail
= get_last_insn ();
6016 next_tail
= NEXT_INSN (tail
);
6020 not_last
= head
!= tail
;
6023 gcc_assert (NEXT_INSN (PREV_INSN (head
)) == head
);
6025 gcc_assert (PREV_INSN (NEXT_INSN (head
)) == head
);
6028 || (NOTE_INSN_BASIC_BLOCK_P (head
)
6030 || (not_first
&& !LABEL_P (PREV_INSN (head
))))))
6032 gcc_assert (bb
== 0);
6033 bb
= BLOCK_FOR_INSN (head
);
6035 gcc_assert (BB_HEAD (bb
) == head
);
6037 /* This is the case of jump table. See inside_basic_block_p (). */
6038 gcc_assert (LABEL_P (head
) && !inside_basic_block_p (head
));
6043 gcc_assert (!inside_basic_block_p (head
));
6044 head
= NEXT_INSN (head
);
6048 gcc_assert (inside_basic_block_p (head
)
6050 gcc_assert (BLOCK_FOR_INSN (head
) == bb
);
6054 head
= NEXT_INSN (head
);
6055 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head
));
6059 if (control_flow_insn_p (head
))
6061 gcc_assert (prev_non_location_insn (BB_END (bb
), head
)
6064 if (any_uncondjump_p (head
))
6065 gcc_assert (EDGE_COUNT (bb
->succs
) == 1
6066 && BARRIER_P (NEXT_INSN (head
)));
6067 else if (any_condjump_p (head
))
6068 gcc_assert (/* Usual case. */
6069 (EDGE_COUNT (bb
->succs
) > 1
6070 && !BARRIER_P (NEXT_INSN (head
)))
6071 /* Or jump to the next instruction. */
6072 || (EDGE_COUNT (bb
->succs
) == 1
6073 && (BB_HEAD (EDGE_I (bb
->succs
, 0)->dest
)
6074 == JUMP_LABEL (head
))));
6076 if (BB_END (bb
) == head
)
6078 if (EDGE_COUNT (bb
->succs
) > 1)
6079 gcc_assert (control_flow_insn_p (prev_non_location_insn
6080 (head
, BB_HEAD (bb
)))
6081 || has_edge_p (bb
->succs
, EDGE_COMPLEX
));
6085 head
= NEXT_INSN (head
);
6091 while (head
!= next_tail
);
6093 gcc_assert (bb
== 0);
6096 #endif /* ENABLE_CHECKING */
6098 /* Extend data structures for logical insn UID. */
6100 sched_extend_luids (void)
6102 int new_luids_max_uid
= get_max_uid () + 1;
6104 VEC_safe_grow_cleared (int, heap
, sched_luids
, new_luids_max_uid
);
6107 /* Initialize LUID for INSN. */
6109 sched_init_insn_luid (rtx insn
)
6111 int i
= INSN_P (insn
) ? 1 : common_sched_info
->luid_for_non_insn (insn
);
6116 luid
= sched_max_luid
;
6117 sched_max_luid
+= i
;
6122 SET_INSN_LUID (insn
, luid
);
6125 /* Initialize luids for BBS.
6126 The hook common_sched_info->luid_for_non_insn () is used to determine
6127 if notes, labels, etc. need luids. */
6129 sched_init_luids (bb_vec_t bbs
)
6134 sched_extend_luids ();
6135 FOR_EACH_VEC_ELT (basic_block
, bbs
, i
, bb
)
6139 FOR_BB_INSNS (bb
, insn
)
6140 sched_init_insn_luid (insn
);
6146 sched_finish_luids (void)
6148 VEC_free (int, heap
, sched_luids
);
6152 /* Return logical uid of INSN. Helpful while debugging. */
6154 insn_luid (rtx insn
)
6156 return INSN_LUID (insn
);
6159 /* Extend per insn data in the target. */
6161 sched_extend_target (void)
6163 if (targetm
.sched
.h_i_d_extended
)
6164 targetm
.sched
.h_i_d_extended ();
6167 /* Extend global scheduler structures (those, that live across calls to
6168 schedule_block) to include information about just emitted INSN. */
6172 int reserve
= (get_max_uid () + 1
6173 - VEC_length (haifa_insn_data_def
, h_i_d
));
6175 && ! VEC_space (haifa_insn_data_def
, h_i_d
, reserve
))
6177 VEC_safe_grow_cleared (haifa_insn_data_def
, heap
, h_i_d
,
6178 3 * get_max_uid () / 2);
6179 sched_extend_target ();
6183 /* Initialize h_i_d entry of the INSN with default values.
6184 Values, that are not explicitly initialized here, hold zero. */
6186 init_h_i_d (rtx insn
)
6188 if (INSN_LUID (insn
) > 0)
6190 INSN_COST (insn
) = -1;
6191 QUEUE_INDEX (insn
) = QUEUE_NOWHERE
;
6192 INSN_TICK (insn
) = INVALID_TICK
;
6193 INSN_EXACT_TICK (insn
) = INVALID_TICK
;
6194 INTER_TICK (insn
) = INVALID_TICK
;
6195 TODO_SPEC (insn
) = HARD_DEP
;
6199 /* Initialize haifa_insn_data for BBS. */
6201 haifa_init_h_i_d (bb_vec_t bbs
)
6207 FOR_EACH_VEC_ELT (basic_block
, bbs
, i
, bb
)
6211 FOR_BB_INSNS (bb
, insn
)
6216 /* Finalize haifa_insn_data. */
6218 haifa_finish_h_i_d (void)
6221 haifa_insn_data_t data
;
6222 struct reg_use_data
*use
, *next
;
6224 FOR_EACH_VEC_ELT (haifa_insn_data_def
, h_i_d
, i
, data
)
6226 free (data
->reg_pressure
);
6227 for (use
= data
->reg_use_list
; use
!= NULL
; use
= next
)
6229 next
= use
->next_insn_use
;
6233 VEC_free (haifa_insn_data_def
, heap
, h_i_d
);
6236 /* Init data for the new insn INSN. */
6238 haifa_init_insn (rtx insn
)
6240 gcc_assert (insn
!= NULL
);
6242 sched_extend_luids ();
6243 sched_init_insn_luid (insn
);
6244 sched_extend_target ();
6245 sched_deps_init (false);
6249 if (adding_bb_to_current_region_p
)
6251 sd_init_insn (insn
);
6253 /* Extend dependency caches by one element. */
6254 extend_dependency_caches (1, false);
6256 if (sched_pressure_p
)
6257 init_insn_reg_pressure_info (insn
);
6260 /* Init data for the new basic block BB which comes after AFTER. */
6262 haifa_init_only_bb (basic_block bb
, basic_block after
)
6264 gcc_assert (bb
!= NULL
);
6268 if (common_sched_info
->add_block
)
6269 /* This changes only data structures of the front-end. */
6270 common_sched_info
->add_block (bb
, after
);
6273 /* A generic version of sched_split_block (). */
6275 sched_split_block_1 (basic_block first_bb
, rtx after
)
6279 e
= split_block (first_bb
, after
);
6280 gcc_assert (e
->src
== first_bb
);
6282 /* sched_split_block emits note if *check == BB_END. Probably it
6283 is better to rip that note off. */
6288 /* A generic version of sched_create_empty_bb (). */
6290 sched_create_empty_bb_1 (basic_block after
)
6292 return create_empty_bb (after
);
6295 /* Insert PAT as an INSN into the schedule and update the necessary data
6296 structures to account for it. */
6298 sched_emit_insn (rtx pat
)
6300 rtx insn
= emit_insn_before (pat
, nonscheduled_insns_begin
);
6301 haifa_init_insn (insn
);
6303 if (current_sched_info
->add_remove_insn
)
6304 current_sched_info
->add_remove_insn (insn
, 0);
6306 (*current_sched_info
->begin_schedule_ready
) (insn
);
6307 VEC_safe_push (rtx
, heap
, scheduled_insns
, insn
);
6309 last_scheduled_insn
= insn
;
6313 /* This function returns a candidate satisfying dispatch constraints from
6317 ready_remove_first_dispatch (struct ready_list
*ready
)
6320 rtx insn
= ready_element (ready
, 0);
6322 if (ready
->n_ready
== 1
6323 || INSN_CODE (insn
) < 0
6325 || !active_insn_p (insn
)
6326 || targetm
.sched
.dispatch (insn
, FITS_DISPATCH_WINDOW
))
6327 return ready_remove_first (ready
);
6329 for (i
= 1; i
< ready
->n_ready
; i
++)
6331 insn
= ready_element (ready
, i
);
6333 if (INSN_CODE (insn
) < 0
6335 || !active_insn_p (insn
))
6338 if (targetm
.sched
.dispatch (insn
, FITS_DISPATCH_WINDOW
))
6340 /* Return ith element of ready. */
6341 insn
= ready_remove (ready
, i
);
6346 if (targetm
.sched
.dispatch (NULL_RTX
, DISPATCH_VIOLATION
))
6347 return ready_remove_first (ready
);
6349 for (i
= 1; i
< ready
->n_ready
; i
++)
6351 insn
= ready_element (ready
, i
);
6353 if (INSN_CODE (insn
) < 0
6355 || !active_insn_p (insn
))
6358 /* Return i-th element of ready. */
6359 if (targetm
.sched
.dispatch (insn
, IS_CMP
))
6360 return ready_remove (ready
, i
);
6363 return ready_remove_first (ready
);
6366 /* Get number of ready insn in the ready list. */
6369 number_in_ready (void)
6371 return ready
.n_ready
;
6374 /* Get number of ready's in the ready list. */
6377 get_ready_element (int i
)
6379 return ready_element (&ready
, i
);
6382 #endif /* INSN_SCHEDULING */