1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 2, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
60 1. choose insn with the longest path to end of bb, ties
62 2. choose insn with least contribution to register pressure,
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using LOG_LINKS backward
86 dependences. LOG_LINKS are translated into INSN_DEPEND forward
87 dependences for the purpose of forward list scheduling.
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
129 #include "coretypes.h"
134 #include "hard-reg-set.h"
136 #include "function.h"
138 #include "insn-config.h"
139 #include "insn-attr.h"
143 #include "sched-int.h"
148 #ifdef INSN_SCHEDULING
150 /* issue_rate is the number of insns that can be scheduled in the same
151 machine cycle. It can be defined in the config/mach/mach.h file,
152 otherwise we set it to 1. */
154 static int issue_rate
;
156 /* sched-verbose controls the amount of debugging output the
157 scheduler prints. It is controlled by -fsched-verbose=N:
158 N>0 and no -DSR : the output is directed to stderr.
159 N>=10 will direct the printouts to stderr (regardless of -dSR).
161 N=2: bb's probabilities, detailed ready list info, unit/insn info.
162 N=3: rtl at abort point, control-flow, regions info.
163 N=5: dependences info. */
165 static int sched_verbose_param
= 0;
166 int sched_verbose
= 0;
168 /* Debugging file. All printouts are sent to dump, which is always set,
169 either to stderr, or to the dump listing file (-dRS). */
170 FILE *sched_dump
= 0;
172 /* Highest uid before scheduling. */
173 static int old_max_uid
;
175 /* fix_sched_param() is called from toplev.c upon detection
176 of the -fsched-verbose=N option. */
179 fix_sched_param (const char *param
, const char *val
)
181 if (!strcmp (param
, "verbose"))
182 sched_verbose_param
= atoi (val
);
184 warning (0, "fix_sched_param: unknown param: %s", param
);
187 struct haifa_insn_data
*h_i_d
;
189 #define LINE_NOTE(INSN) (h_i_d[INSN_UID (INSN)].line_note)
190 #define INSN_TICK(INSN) (h_i_d[INSN_UID (INSN)].tick)
191 #define INTER_TICK(INSN) (h_i_d[INSN_UID (INSN)].inter_tick)
193 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
194 then it should be recalculated from scratch. */
195 #define INVALID_TICK (-(max_insn_queue_index + 1))
196 /* The minimal value of the INSN_TICK of an instruction. */
197 #define MIN_TICK (-max_insn_queue_index)
199 /* Issue points are used to distinguish between instructions in max_issue ().
200 For now, all instructions are equally good. */
201 #define ISSUE_POINTS(INSN) 1
203 /* Vector indexed by basic block number giving the starting line-number
204 for each basic block. */
205 static rtx
*line_note_head
;
207 /* List of important notes we must keep around. This is a pointer to the
208 last element in the list. */
209 static rtx note_list
;
211 static struct spec_info_def spec_info_var
;
212 /* Description of the speculative part of the scheduling.
213 If NULL - no speculation. */
214 static spec_info_t spec_info
;
216 /* True, if recovery block was added during scheduling of current block.
217 Used to determine, if we need to fix INSN_TICKs. */
218 static bool added_recovery_block_p
;
220 /* Counters of different types of speculative isntructions. */
221 static int nr_begin_data
, nr_be_in_data
, nr_begin_control
, nr_be_in_control
;
223 /* Pointers to GLAT data. See init_glat for more information. */
224 regset
*glat_start
, *glat_end
;
226 /* Array used in {unlink, restore}_bb_notes. */
227 static rtx
*bb_header
= 0;
229 /* Number of basic_blocks. */
230 static int old_last_basic_block
;
232 /* Basic block after which recovery blocks will be created. */
233 static basic_block before_recovery
;
237 /* An instruction is ready to be scheduled when all insns preceding it
238 have already been scheduled. It is important to ensure that all
239 insns which use its result will not be executed until its result
240 has been computed. An insn is maintained in one of four structures:
242 (P) the "Pending" set of insns which cannot be scheduled until
243 their dependencies have been satisfied.
244 (Q) the "Queued" set of insns that can be scheduled when sufficient
246 (R) the "Ready" list of unscheduled, uncommitted insns.
247 (S) the "Scheduled" list of insns.
249 Initially, all insns are either "Pending" or "Ready" depending on
250 whether their dependencies are satisfied.
252 Insns move from the "Ready" list to the "Scheduled" list as they
253 are committed to the schedule. As this occurs, the insns in the
254 "Pending" list have their dependencies satisfied and move to either
255 the "Ready" list or the "Queued" set depending on whether
256 sufficient time has passed to make them ready. As time passes,
257 insns move from the "Queued" set to the "Ready" list.
259 The "Pending" list (P) are the insns in the INSN_DEPEND of the unscheduled
260 insns, i.e., those that are ready, queued, and pending.
261 The "Queued" set (Q) is implemented by the variable `insn_queue'.
262 The "Ready" list (R) is implemented by the variables `ready' and
264 The "Scheduled" list (S) is the new insn chain built by this pass.
266 The transition (R->S) is implemented in the scheduling loop in
267 `schedule_block' when the best insn to schedule is chosen.
268 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
269 insns move from the ready list to the scheduled list.
270 The transition (Q->R) is implemented in 'queue_to_insn' as time
271 passes or stalls are introduced. */
273 /* Implement a circular buffer to delay instructions until sufficient
274 time has passed. For the new pipeline description interface,
275 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
276 than maximal time of instruction execution computed by genattr.c on
277 the base maximal time of functional unit reservations and getting a
278 result. This is the longest time an insn may be queued. */
280 static rtx
*insn_queue
;
281 static int q_ptr
= 0;
282 static int q_size
= 0;
283 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
284 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
286 #define QUEUE_SCHEDULED (-3)
287 #define QUEUE_NOWHERE (-2)
288 #define QUEUE_READY (-1)
289 /* QUEUE_SCHEDULED - INSN is scheduled.
290 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
292 QUEUE_READY - INSN is in ready list.
293 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
295 #define QUEUE_INDEX(INSN) (h_i_d[INSN_UID (INSN)].queue_index)
297 /* The following variable value refers for all current and future
298 reservations of the processor units. */
301 /* The following variable value is size of memory representing all
302 current and future reservations of the processor units. */
303 static size_t dfa_state_size
;
305 /* The following array is used to find the best insn from ready when
306 the automaton pipeline interface is used. */
307 static char *ready_try
;
309 /* Describe the ready list of the scheduler.
310 VEC holds space enough for all insns in the current region. VECLEN
311 says how many exactly.
312 FIRST is the index of the element with the highest priority; i.e. the
313 last one in the ready list, since elements are ordered by ascending
315 N_READY determines how many insns are on the ready list. */
325 /* The pointer to the ready list. */
326 static struct ready_list
*readyp
;
328 /* Scheduling clock. */
329 static int clock_var
;
331 /* Number of instructions in current scheduling region. */
332 static int rgn_n_insns
;
334 static int may_trap_exp (rtx
, int);
336 /* Nonzero iff the address is comprised from at most 1 register. */
337 #define CONST_BASED_ADDRESS_P(x) \
339 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
340 || (GET_CODE (x) == LO_SUM)) \
341 && (CONSTANT_P (XEXP (x, 0)) \
342 || CONSTANT_P (XEXP (x, 1)))))
344 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
345 as found by analyzing insn's expression. */
348 may_trap_exp (rtx x
, int is_store
)
357 if (code
== MEM
&& may_trap_p (x
))
364 /* The insn uses memory: a volatile load. */
365 if (MEM_VOLATILE_P (x
))
367 /* An exception-free load. */
370 /* A load with 1 base register, to be further checked. */
371 if (CONST_BASED_ADDRESS_P (XEXP (x
, 0)))
372 return PFREE_CANDIDATE
;
373 /* No info on the load, to be further checked. */
374 return PRISKY_CANDIDATE
;
379 int i
, insn_class
= TRAP_FREE
;
381 /* Neither store nor load, check if it may cause a trap. */
384 /* Recursive step: walk the insn... */
385 fmt
= GET_RTX_FORMAT (code
);
386 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
390 int tmp_class
= may_trap_exp (XEXP (x
, i
), is_store
);
391 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
393 else if (fmt
[i
] == 'E')
396 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
398 int tmp_class
= may_trap_exp (XVECEXP (x
, i
, j
), is_store
);
399 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
400 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
404 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
411 /* Classifies insn for the purpose of verifying that it can be
412 moved speculatively, by examining it's patterns, returning:
413 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
414 TRAP_FREE: non-load insn.
415 IFREE: load from a globally safe location.
416 IRISKY: volatile load.
417 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
418 being either PFREE or PRISKY. */
421 haifa_classify_insn (rtx insn
)
423 rtx pat
= PATTERN (insn
);
424 int tmp_class
= TRAP_FREE
;
425 int insn_class
= TRAP_FREE
;
428 if (GET_CODE (pat
) == PARALLEL
)
430 int i
, len
= XVECLEN (pat
, 0);
432 for (i
= len
- 1; i
>= 0; i
--)
434 code
= GET_CODE (XVECEXP (pat
, 0, i
));
438 /* Test if it is a 'store'. */
439 tmp_class
= may_trap_exp (XEXP (XVECEXP (pat
, 0, i
), 0), 1);
442 /* Test if it is a store. */
443 tmp_class
= may_trap_exp (SET_DEST (XVECEXP (pat
, 0, i
)), 1);
444 if (tmp_class
== TRAP_RISKY
)
446 /* Test if it is a load. */
448 = WORST_CLASS (tmp_class
,
449 may_trap_exp (SET_SRC (XVECEXP (pat
, 0, i
)),
454 tmp_class
= TRAP_RISKY
;
459 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
460 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
466 code
= GET_CODE (pat
);
470 /* Test if it is a 'store'. */
471 tmp_class
= may_trap_exp (XEXP (pat
, 0), 1);
474 /* Test if it is a store. */
475 tmp_class
= may_trap_exp (SET_DEST (pat
), 1);
476 if (tmp_class
== TRAP_RISKY
)
478 /* Test if it is a load. */
480 WORST_CLASS (tmp_class
,
481 may_trap_exp (SET_SRC (pat
), 0));
485 tmp_class
= TRAP_RISKY
;
489 insn_class
= tmp_class
;
495 /* Forward declarations. */
497 HAIFA_INLINE
static int insn_cost1 (rtx
, enum reg_note
, rtx
, rtx
);
498 static int priority (rtx
);
499 static int rank_for_schedule (const void *, const void *);
500 static void swap_sort (rtx
*, int);
501 static void queue_insn (rtx
, int);
502 static int schedule_insn (rtx
);
503 static int find_set_reg_weight (rtx
);
504 static void find_insn_reg_weight (basic_block
);
505 static void find_insn_reg_weight1 (rtx
);
506 static void adjust_priority (rtx
);
507 static void advance_one_cycle (void);
509 /* Notes handling mechanism:
510 =========================
511 Generally, NOTES are saved before scheduling and restored after scheduling.
512 The scheduler distinguishes between three types of notes:
514 (1) LINE_NUMBER notes, generated and used for debugging. Here,
515 before scheduling a region, a pointer to the LINE_NUMBER note is
516 added to the insn following it (in save_line_notes()), and the note
517 is removed (in rm_line_notes() and unlink_line_notes()). After
518 scheduling the region, this pointer is used for regeneration of
519 the LINE_NUMBER note (in restore_line_notes()).
521 (2) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
522 Before scheduling a region, a pointer to the note is added to the insn
523 that follows or precedes it. (This happens as part of the data dependence
524 computation). After scheduling an insn, the pointer contained in it is
525 used for regenerating the corresponding note (in reemit_notes).
527 (3) All other notes (e.g. INSN_DELETED): Before scheduling a block,
528 these notes are put in a list (in rm_other_notes() and
529 unlink_other_notes ()). After scheduling the block, these notes are
530 inserted at the beginning of the block (in schedule_block()). */
532 static rtx
unlink_other_notes (rtx
, rtx
);
533 static rtx
unlink_line_notes (rtx
, rtx
);
534 static void reemit_notes (rtx
);
536 static rtx
*ready_lastpos (struct ready_list
*);
537 static void ready_add (struct ready_list
*, rtx
, bool);
538 static void ready_sort (struct ready_list
*);
539 static rtx
ready_remove_first (struct ready_list
*);
541 static void queue_to_ready (struct ready_list
*);
542 static int early_queue_to_ready (state_t
, struct ready_list
*);
544 static void debug_ready_list (struct ready_list
*);
546 static void move_insn (rtx
);
548 /* The following functions are used to implement multi-pass scheduling
549 on the first cycle. */
550 static rtx
ready_element (struct ready_list
*, int);
551 static rtx
ready_remove (struct ready_list
*, int);
552 static void ready_remove_insn (rtx
);
553 static int max_issue (struct ready_list
*, int *, int);
555 static rtx
choose_ready (struct ready_list
*);
557 static void fix_inter_tick (rtx
, rtx
);
558 static int fix_tick_ready (rtx
);
559 static void change_queue_index (rtx
, int);
560 static void resolve_dep (rtx
, rtx
);
562 /* The following functions are used to implement scheduling of data/control
563 speculative instructions. */
565 static void extend_h_i_d (void);
566 static void extend_ready (int);
567 static void extend_global (rtx
);
568 static void extend_all (rtx
);
569 static void init_h_i_d (rtx
);
570 static void generate_recovery_code (rtx
);
571 static void process_insn_depend_be_in_spec (rtx
, rtx
, ds_t
);
572 static void begin_speculative_block (rtx
);
573 static void add_to_speculative_block (rtx
);
574 static dw_t
dep_weak (ds_t
);
575 static edge
find_fallthru_edge (basic_block
);
576 static void init_before_recovery (void);
577 static basic_block
create_recovery_block (void);
578 static void create_check_block_twin (rtx
, bool);
579 static void fix_recovery_deps (basic_block
);
580 static void associate_line_notes_with_blocks (basic_block
);
581 static void change_pattern (rtx
, rtx
);
582 static int speculate_insn (rtx
, ds_t
, rtx
*);
583 static void dump_new_block_header (int, basic_block
, rtx
, rtx
);
584 static void restore_bb_notes (basic_block
);
585 static void extend_bb (basic_block
);
586 static void fix_jump_move (rtx
);
587 static void move_block_after_check (rtx
);
588 static void move_succs (VEC(edge
,gc
) **, basic_block
);
589 static void init_glat (void);
590 static void init_glat1 (basic_block
);
591 static void attach_life_info1 (basic_block
);
592 static void free_glat (void);
593 static void sched_remove_insn (rtx
);
594 static void clear_priorities (rtx
);
595 static void add_jump_dependencies (rtx
, rtx
);
596 static rtx
bb_note (basic_block
);
597 static void calc_priorities (rtx
);
598 #ifdef ENABLE_CHECKING
599 static int has_edge_p (VEC(edge
,gc
) *, int);
600 static void check_cfg (rtx
, rtx
);
601 static void check_sched_flags (void);
604 #endif /* INSN_SCHEDULING */
606 /* Point to state used for the current scheduling pass. */
607 struct sched_info
*current_sched_info
;
609 #ifndef INSN_SCHEDULING
611 schedule_insns (void)
616 /* Working copy of frontend's sched_info variable. */
617 static struct sched_info current_sched_info_var
;
619 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
620 so that insns independent of the last scheduled insn will be preferred
621 over dependent instructions. */
623 static rtx last_scheduled_insn
;
625 /* Compute cost of executing INSN given the dependence LINK on the insn USED.
626 This is the number of cycles between instruction issue and
627 instruction results. */
630 insn_cost (rtx insn
, rtx link
, rtx used
)
632 return insn_cost1 (insn
, used
? REG_NOTE_KIND (link
) : REG_NOTE_MAX
,
636 /* Compute cost of executing INSN given the dependence on the insn USED.
637 If LINK is not NULL, then its REG_NOTE_KIND is used as a dependence type.
638 Otherwise, dependence between INSN and USED is assumed to be of type
639 DEP_TYPE. This function was introduced as a workaround for
640 targetm.adjust_cost hook.
641 This is the number of cycles between instruction issue and
642 instruction results. */
644 HAIFA_INLINE
static int
645 insn_cost1 (rtx insn
, enum reg_note dep_type
, rtx link
, rtx used
)
647 int cost
= INSN_COST (insn
);
651 /* A USE insn, or something else we don't need to
652 understand. We can't pass these directly to
653 result_ready_cost or insn_default_latency because it will
654 trigger a fatal error for unrecognizable insns. */
655 if (recog_memoized (insn
) < 0)
657 INSN_COST (insn
) = 0;
662 cost
= insn_default_latency (insn
);
666 INSN_COST (insn
) = cost
;
670 /* In this case estimate cost without caring how insn is used. */
674 /* A USE insn should never require the value used to be computed.
675 This allows the computation of a function's result and parameter
676 values to overlap the return and call. */
677 if (recog_memoized (used
) < 0)
681 gcc_assert (!link
|| dep_type
== REG_NOTE_KIND (link
));
683 if (INSN_CODE (insn
) >= 0)
685 if (dep_type
== REG_DEP_ANTI
)
687 else if (dep_type
== REG_DEP_OUTPUT
)
689 cost
= (insn_default_latency (insn
)
690 - insn_default_latency (used
));
694 else if (bypass_p (insn
))
695 cost
= insn_latency (insn
, used
);
698 if (targetm
.sched
.adjust_cost_2
)
699 cost
= targetm
.sched
.adjust_cost_2 (used
, (int) dep_type
, insn
, cost
);
703 if (targetm
.sched
.adjust_cost
)
704 cost
= targetm
.sched
.adjust_cost (used
, link
, insn
, cost
);
714 /* Compute the priority number for INSN. */
724 if (! INSN_PRIORITY_KNOWN (insn
))
726 int this_priority
= 0;
728 if (INSN_DEPEND (insn
) == 0)
729 this_priority
= insn_cost (insn
, 0, 0);
732 rtx prev_first
, twin
;
735 /* For recovery check instructions we calculate priority slightly
736 different than that of normal instructions. Instead of walking
737 through INSN_DEPEND (check) list, we walk through INSN_DEPEND list
738 of each instruction in the corresponding recovery block. */
740 rec
= RECOVERY_BLOCK (insn
);
741 if (!rec
|| rec
== EXIT_BLOCK_PTR
)
743 prev_first
= PREV_INSN (insn
);
748 prev_first
= NEXT_INSN (BB_HEAD (rec
));
749 twin
= PREV_INSN (BB_END (rec
));
754 for (link
= INSN_DEPEND (twin
); link
; link
= XEXP (link
, 1))
759 next
= XEXP (link
, 0);
761 if (BLOCK_FOR_INSN (next
) != rec
)
763 /* Critical path is meaningful in block boundaries
765 if (! (*current_sched_info
->contributes_to_priority
)
767 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
768 then speculative instructions will less likely be
769 scheduled. That is because the priority of
770 their producers will increase, and, thus, the
771 producers will more likely be scheduled, thus,
772 resolving the dependence. */
773 || ((current_sched_info
->flags
& DO_SPECULATION
)
774 && (DEP_STATUS (link
) & SPECULATIVE
)
775 && !(spec_info
->flags
776 & COUNT_SPEC_IN_CRITICAL_PATH
)))
779 next_priority
= insn_cost1 (insn
,
781 REG_NOTE_KIND (link
) :
783 twin
== insn
? link
: 0,
784 next
) + priority (next
);
786 if (next_priority
> this_priority
)
787 this_priority
= next_priority
;
791 twin
= PREV_INSN (twin
);
793 while (twin
!= prev_first
);
795 INSN_PRIORITY (insn
) = this_priority
;
796 INSN_PRIORITY_KNOWN (insn
) = 1;
799 return INSN_PRIORITY (insn
);
802 /* Macros and functions for keeping the priority queue sorted, and
803 dealing with queuing and dequeuing of instructions. */
805 #define SCHED_SORT(READY, N_READY) \
806 do { if ((N_READY) == 2) \
807 swap_sort (READY, N_READY); \
808 else if ((N_READY) > 2) \
809 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
812 /* Returns a positive value if x is preferred; returns a negative value if
813 y is preferred. Should never return 0, since that will make the sort
817 rank_for_schedule (const void *x
, const void *y
)
819 rtx tmp
= *(const rtx
*) y
;
820 rtx tmp2
= *(const rtx
*) x
;
822 int tmp_class
, tmp2_class
, depend_count1
, depend_count2
;
823 int val
, priority_val
, weight_val
, info_val
;
825 /* The insn in a schedule group should be issued the first. */
826 if (SCHED_GROUP_P (tmp
) != SCHED_GROUP_P (tmp2
))
827 return SCHED_GROUP_P (tmp2
) ? 1 : -1;
829 /* Prefer insn with higher priority. */
830 priority_val
= INSN_PRIORITY (tmp2
) - INSN_PRIORITY (tmp
);
835 /* Prefer speculative insn with greater dependencies weakness. */
842 ds1
= TODO_SPEC (tmp
) & SPECULATIVE
;
844 dw1
= dep_weak (ds1
);
848 ds2
= TODO_SPEC (tmp2
) & SPECULATIVE
;
850 dw2
= dep_weak (ds2
);
855 if (dw
> (NO_DEP_WEAK
/ 8) || dw
< -(NO_DEP_WEAK
/ 8))
859 /* Prefer an insn with smaller contribution to registers-pressure. */
860 if (!reload_completed
&&
861 (weight_val
= INSN_REG_WEIGHT (tmp
) - INSN_REG_WEIGHT (tmp2
)))
864 info_val
= (*current_sched_info
->rank
) (tmp
, tmp2
);
868 /* Compare insns based on their relation to the last-scheduled-insn. */
869 if (INSN_P (last_scheduled_insn
))
871 /* Classify the instructions into three classes:
872 1) Data dependent on last schedule insn.
873 2) Anti/Output dependent on last scheduled insn.
874 3) Independent of last scheduled insn, or has latency of one.
875 Choose the insn from the highest numbered class if different. */
876 link
= find_insn_list (tmp
, INSN_DEPEND (last_scheduled_insn
));
877 if (link
== 0 || insn_cost (last_scheduled_insn
, link
, tmp
) == 1)
879 else if (REG_NOTE_KIND (link
) == 0) /* Data dependence. */
884 link
= find_insn_list (tmp2
, INSN_DEPEND (last_scheduled_insn
));
885 if (link
== 0 || insn_cost (last_scheduled_insn
, link
, tmp2
) == 1)
887 else if (REG_NOTE_KIND (link
) == 0) /* Data dependence. */
892 if ((val
= tmp2_class
- tmp_class
))
896 /* Prefer the insn which has more later insns that depend on it.
897 This gives the scheduler more freedom when scheduling later
898 instructions at the expense of added register pressure. */
900 for (link
= INSN_DEPEND (tmp
); link
; link
= XEXP (link
, 1))
904 for (link
= INSN_DEPEND (tmp2
); link
; link
= XEXP (link
, 1))
907 val
= depend_count2
- depend_count1
;
911 /* If insns are equally good, sort by INSN_LUID (original insn order),
912 so that we make the sort stable. This minimizes instruction movement,
913 thus minimizing sched's effect on debugging and cross-jumping. */
914 return INSN_LUID (tmp
) - INSN_LUID (tmp2
);
917 /* Resort the array A in which only element at index N may be out of order. */
919 HAIFA_INLINE
static void
920 swap_sort (rtx
*a
, int n
)
925 while (i
>= 0 && rank_for_schedule (a
+ i
, &insn
) >= 0)
933 /* Add INSN to the insn queue so that it can be executed at least
934 N_CYCLES after the currently executing insn. Preserve insns
935 chain for debugging purposes. */
937 HAIFA_INLINE
static void
938 queue_insn (rtx insn
, int n_cycles
)
940 int next_q
= NEXT_Q_AFTER (q_ptr
, n_cycles
);
941 rtx link
= alloc_INSN_LIST (insn
, insn_queue
[next_q
]);
943 gcc_assert (n_cycles
<= max_insn_queue_index
);
945 insn_queue
[next_q
] = link
;
948 if (sched_verbose
>= 2)
950 fprintf (sched_dump
, ";;\t\tReady-->Q: insn %s: ",
951 (*current_sched_info
->print_insn
) (insn
, 0));
953 fprintf (sched_dump
, "queued for %d cycles.\n", n_cycles
);
956 QUEUE_INDEX (insn
) = next_q
;
959 /* Remove INSN from queue. */
961 queue_remove (rtx insn
)
963 gcc_assert (QUEUE_INDEX (insn
) >= 0);
964 remove_free_INSN_LIST_elem (insn
, &insn_queue
[QUEUE_INDEX (insn
)]);
966 QUEUE_INDEX (insn
) = QUEUE_NOWHERE
;
969 /* Return a pointer to the bottom of the ready list, i.e. the insn
970 with the lowest priority. */
972 HAIFA_INLINE
static rtx
*
973 ready_lastpos (struct ready_list
*ready
)
975 gcc_assert (ready
->n_ready
>= 1);
976 return ready
->vec
+ ready
->first
- ready
->n_ready
+ 1;
979 /* Add an element INSN to the ready list so that it ends up with the
980 lowest/highest priority dependending on FIRST_P. */
982 HAIFA_INLINE
static void
983 ready_add (struct ready_list
*ready
, rtx insn
, bool first_p
)
987 if (ready
->first
== ready
->n_ready
)
989 memmove (ready
->vec
+ ready
->veclen
- ready
->n_ready
,
990 ready_lastpos (ready
),
991 ready
->n_ready
* sizeof (rtx
));
992 ready
->first
= ready
->veclen
- 1;
994 ready
->vec
[ready
->first
- ready
->n_ready
] = insn
;
998 if (ready
->first
== ready
->veclen
- 1)
1001 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1002 memmove (ready
->vec
+ ready
->veclen
- ready
->n_ready
- 1,
1003 ready_lastpos (ready
),
1004 ready
->n_ready
* sizeof (rtx
));
1005 ready
->first
= ready
->veclen
- 2;
1007 ready
->vec
[++(ready
->first
)] = insn
;
1012 gcc_assert (QUEUE_INDEX (insn
) != QUEUE_READY
);
1013 QUEUE_INDEX (insn
) = QUEUE_READY
;
1016 /* Remove the element with the highest priority from the ready list and
1019 HAIFA_INLINE
static rtx
1020 ready_remove_first (struct ready_list
*ready
)
1024 gcc_assert (ready
->n_ready
);
1025 t
= ready
->vec
[ready
->first
--];
1027 /* If the queue becomes empty, reset it. */
1028 if (ready
->n_ready
== 0)
1029 ready
->first
= ready
->veclen
- 1;
1031 gcc_assert (QUEUE_INDEX (t
) == QUEUE_READY
);
1032 QUEUE_INDEX (t
) = QUEUE_NOWHERE
;
1037 /* The following code implements multi-pass scheduling for the first
1038 cycle. In other words, we will try to choose ready insn which
1039 permits to start maximum number of insns on the same cycle. */
1041 /* Return a pointer to the element INDEX from the ready. INDEX for
1042 insn with the highest priority is 0, and the lowest priority has
1045 HAIFA_INLINE
static rtx
1046 ready_element (struct ready_list
*ready
, int index
)
1048 gcc_assert (ready
->n_ready
&& index
< ready
->n_ready
);
1050 return ready
->vec
[ready
->first
- index
];
1053 /* Remove the element INDEX from the ready list and return it. INDEX
1054 for insn with the highest priority is 0, and the lowest priority
1057 HAIFA_INLINE
static rtx
1058 ready_remove (struct ready_list
*ready
, int index
)
1064 return ready_remove_first (ready
);
1065 gcc_assert (ready
->n_ready
&& index
< ready
->n_ready
);
1066 t
= ready
->vec
[ready
->first
- index
];
1068 for (i
= index
; i
< ready
->n_ready
; i
++)
1069 ready
->vec
[ready
->first
- i
] = ready
->vec
[ready
->first
- i
- 1];
1070 QUEUE_INDEX (t
) = QUEUE_NOWHERE
;
1074 /* Remove INSN from the ready list. */
1076 ready_remove_insn (rtx insn
)
1080 for (i
= 0; i
< readyp
->n_ready
; i
++)
1081 if (ready_element (readyp
, i
) == insn
)
1083 ready_remove (readyp
, i
);
1089 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1092 HAIFA_INLINE
static void
1093 ready_sort (struct ready_list
*ready
)
1095 rtx
*first
= ready_lastpos (ready
);
1096 SCHED_SORT (first
, ready
->n_ready
);
1099 /* PREV is an insn that is ready to execute. Adjust its priority if that
1100 will help shorten or lengthen register lifetimes as appropriate. Also
1101 provide a hook for the target to tweek itself. */
1103 HAIFA_INLINE
static void
1104 adjust_priority (rtx prev
)
1106 /* ??? There used to be code here to try and estimate how an insn
1107 affected register lifetimes, but it did it by looking at REG_DEAD
1108 notes, which we removed in schedule_region. Nor did it try to
1109 take into account register pressure or anything useful like that.
1111 Revisit when we have a machine model to work with and not before. */
1113 if (targetm
.sched
.adjust_priority
)
1114 INSN_PRIORITY (prev
) =
1115 targetm
.sched
.adjust_priority (prev
, INSN_PRIORITY (prev
));
1118 /* Advance time on one cycle. */
1119 HAIFA_INLINE
static void
1120 advance_one_cycle (void)
1122 if (targetm
.sched
.dfa_pre_cycle_insn
)
1123 state_transition (curr_state
,
1124 targetm
.sched
.dfa_pre_cycle_insn ());
1126 state_transition (curr_state
, NULL
);
1128 if (targetm
.sched
.dfa_post_cycle_insn
)
1129 state_transition (curr_state
,
1130 targetm
.sched
.dfa_post_cycle_insn ());
1133 /* Clock at which the previous instruction was issued. */
1134 static int last_clock_var
;
1136 /* INSN is the "currently executing insn". Launch each insn which was
1137 waiting on INSN. READY is the ready list which contains the insns
1138 that are ready to fire. CLOCK is the current cycle. The function
1139 returns necessary cycle advance after issuing the insn (it is not
1140 zero for insns in a schedule group). */
1143 schedule_insn (rtx insn
)
1148 if (sched_verbose
>= 1)
1152 print_insn (buf
, insn
, 0);
1154 fprintf (sched_dump
, ";;\t%3i--> %-40s:", clock_var
, buf
);
1156 if (recog_memoized (insn
) < 0)
1157 fprintf (sched_dump
, "nothing");
1159 print_reservation (sched_dump
, insn
);
1160 fputc ('\n', sched_dump
);
1163 /* Scheduling instruction should have all its dependencies resolved and
1164 should have been removed from the ready list. */
1165 gcc_assert (INSN_DEP_COUNT (insn
) == 0);
1166 gcc_assert (!LOG_LINKS (insn
));
1167 gcc_assert (QUEUE_INDEX (insn
) == QUEUE_NOWHERE
);
1169 QUEUE_INDEX (insn
) = QUEUE_SCHEDULED
;
1171 /* Now we can free RESOLVED_DEPS list. */
1172 if (current_sched_info
->flags
& USE_DEPS_LIST
)
1173 free_DEPS_LIST_list (&RESOLVED_DEPS (insn
));
1175 free_INSN_LIST_list (&RESOLVED_DEPS (insn
));
1177 gcc_assert (INSN_TICK (insn
) >= MIN_TICK
);
1178 if (INSN_TICK (insn
) > clock_var
)
1179 /* INSN has been prematurely moved from the queue to the ready list.
1180 This is possible only if following flag is set. */
1181 gcc_assert (flag_sched_stalled_insns
);
1183 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1184 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1185 INSN_TICK (insn
) = clock_var
;
1187 /* Update dependent instructions. */
1188 for (link
= INSN_DEPEND (insn
); link
; link
= XEXP (link
, 1))
1190 rtx next
= XEXP (link
, 0);
1192 resolve_dep (next
, insn
);
1194 if (!RECOVERY_BLOCK (insn
)
1195 || RECOVERY_BLOCK (insn
) == EXIT_BLOCK_PTR
)
1199 effective_cost
= try_ready (next
);
1201 if (effective_cost
>= 0
1202 && SCHED_GROUP_P (next
)
1203 && advance
< effective_cost
)
1204 advance
= effective_cost
;
1207 /* Check always has only one forward dependence (to the first insn in
1208 the recovery block), therefore, this will be executed only once. */
1210 gcc_assert (XEXP (link
, 1) == 0);
1211 fix_recovery_deps (RECOVERY_BLOCK (insn
));
1215 /* Annotate the instruction with issue information -- TImode
1216 indicates that the instruction is expected not to be able
1217 to issue on the same cycle as the previous insn. A machine
1218 may use this information to decide how the instruction should
1221 && GET_CODE (PATTERN (insn
)) != USE
1222 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
1224 if (reload_completed
)
1225 PUT_MODE (insn
, clock_var
> last_clock_var
? TImode
: VOIDmode
);
1226 last_clock_var
= clock_var
;
1232 /* Functions for handling of notes. */
1234 /* Delete notes beginning with INSN and put them in the chain
1235 of notes ended by NOTE_LIST.
1236 Returns the insn following the notes. */
1239 unlink_other_notes (rtx insn
, rtx tail
)
1241 rtx prev
= PREV_INSN (insn
);
1243 while (insn
!= tail
&& NOTE_NOT_BB_P (insn
))
1245 rtx next
= NEXT_INSN (insn
);
1246 /* Delete the note from its current position. */
1248 NEXT_INSN (prev
) = next
;
1250 PREV_INSN (next
) = prev
;
1252 /* See sched_analyze to see how these are handled. */
1253 if (NOTE_LINE_NUMBER (insn
) != NOTE_INSN_EH_REGION_BEG
1254 && NOTE_LINE_NUMBER (insn
) != NOTE_INSN_EH_REGION_END
)
1256 /* Insert the note at the end of the notes list. */
1257 PREV_INSN (insn
) = note_list
;
1259 NEXT_INSN (note_list
) = insn
;
1268 /* Delete line notes beginning with INSN. Record line-number notes so
1269 they can be reused. Returns the insn following the notes. */
1272 unlink_line_notes (rtx insn
, rtx tail
)
1274 rtx prev
= PREV_INSN (insn
);
1276 while (insn
!= tail
&& NOTE_P (insn
))
1278 rtx next
= NEXT_INSN (insn
);
1280 if (write_symbols
!= NO_DEBUG
&& NOTE_LINE_NUMBER (insn
) > 0)
1282 /* Delete the note from its current position. */
1284 NEXT_INSN (prev
) = next
;
1286 PREV_INSN (next
) = prev
;
1288 /* Record line-number notes so they can be reused. */
1289 LINE_NOTE (insn
) = insn
;
1299 /* Return the head and tail pointers of ebb starting at BEG and ending
1303 get_ebb_head_tail (basic_block beg
, basic_block end
, rtx
*headp
, rtx
*tailp
)
1305 rtx beg_head
= BB_HEAD (beg
);
1306 rtx beg_tail
= BB_END (beg
);
1307 rtx end_head
= BB_HEAD (end
);
1308 rtx end_tail
= BB_END (end
);
1310 /* Don't include any notes or labels at the beginning of the BEG
1311 basic block, or notes at the end of the END basic blocks. */
1313 if (LABEL_P (beg_head
))
1314 beg_head
= NEXT_INSN (beg_head
);
1316 while (beg_head
!= beg_tail
)
1317 if (NOTE_P (beg_head
))
1318 beg_head
= NEXT_INSN (beg_head
);
1325 end_head
= beg_head
;
1326 else if (LABEL_P (end_head
))
1327 end_head
= NEXT_INSN (end_head
);
1329 while (end_head
!= end_tail
)
1330 if (NOTE_P (end_tail
))
1331 end_tail
= PREV_INSN (end_tail
);
1338 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1341 no_real_insns_p (rtx head
, rtx tail
)
1343 while (head
!= NEXT_INSN (tail
))
1345 if (!NOTE_P (head
) && !LABEL_P (head
))
1347 head
= NEXT_INSN (head
);
1352 /* Delete line notes from one block. Save them so they can be later restored
1353 (in restore_line_notes). HEAD and TAIL are the boundaries of the
1354 block in which notes should be processed. */
1357 rm_line_notes (rtx head
, rtx tail
)
1362 next_tail
= NEXT_INSN (tail
);
1363 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
1367 /* Farm out notes, and maybe save them in NOTE_LIST.
1368 This is needed to keep the debugger from
1369 getting completely deranged. */
1370 if (NOTE_NOT_BB_P (insn
))
1373 insn
= unlink_line_notes (insn
, next_tail
);
1375 gcc_assert (prev
!= tail
&& prev
!= head
&& insn
!= next_tail
);
1380 /* Save line number notes for each insn in block B. HEAD and TAIL are
1381 the boundaries of the block in which notes should be processed. */
1384 save_line_notes (int b
, rtx head
, rtx tail
)
1388 /* We must use the true line number for the first insn in the block
1389 that was computed and saved at the start of this pass. We can't
1390 use the current line number, because scheduling of the previous
1391 block may have changed the current line number. */
1393 rtx line
= line_note_head
[b
];
1396 next_tail
= NEXT_INSN (tail
);
1398 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
1399 if (NOTE_P (insn
) && NOTE_LINE_NUMBER (insn
) > 0)
1402 LINE_NOTE (insn
) = line
;
1405 /* After a block was scheduled, insert line notes into the insns list.
1406 HEAD and TAIL are the boundaries of the block in which notes should
1410 restore_line_notes (rtx head
, rtx tail
)
1412 rtx line
, note
, prev
, new;
1413 int added_notes
= 0;
1414 rtx next_tail
, insn
;
1417 next_tail
= NEXT_INSN (tail
);
1419 /* Determine the current line-number. We want to know the current
1420 line number of the first insn of the block here, in case it is
1421 different from the true line number that was saved earlier. If
1422 different, then we need a line number note before the first insn
1423 of this block. If it happens to be the same, then we don't want to
1424 emit another line number note here. */
1425 for (line
= head
; line
; line
= PREV_INSN (line
))
1426 if (NOTE_P (line
) && NOTE_LINE_NUMBER (line
) > 0)
1429 /* Walk the insns keeping track of the current line-number and inserting
1430 the line-number notes as needed. */
1431 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
1432 if (NOTE_P (insn
) && NOTE_LINE_NUMBER (insn
) > 0)
1434 /* This used to emit line number notes before every non-deleted note.
1435 However, this confuses a debugger, because line notes not separated
1436 by real instructions all end up at the same address. I can find no
1437 use for line number notes before other notes, so none are emitted. */
1438 else if (!NOTE_P (insn
)
1439 && INSN_UID (insn
) < old_max_uid
1440 && (note
= LINE_NOTE (insn
)) != 0
1443 #ifdef USE_MAPPED_LOCATION
1444 || NOTE_SOURCE_LOCATION (note
) != NOTE_SOURCE_LOCATION (line
)
1446 || NOTE_LINE_NUMBER (note
) != NOTE_LINE_NUMBER (line
)
1447 || NOTE_SOURCE_FILE (note
) != NOTE_SOURCE_FILE (line
)
1452 prev
= PREV_INSN (insn
);
1453 if (LINE_NOTE (note
))
1455 /* Re-use the original line-number note. */
1456 LINE_NOTE (note
) = 0;
1457 PREV_INSN (note
) = prev
;
1458 NEXT_INSN (prev
) = note
;
1459 PREV_INSN (insn
) = note
;
1460 NEXT_INSN (note
) = insn
;
1461 set_block_for_insn (note
, BLOCK_FOR_INSN (insn
));
1466 new = emit_note_after (NOTE_LINE_NUMBER (note
), prev
);
1467 #ifndef USE_MAPPED_LOCATION
1468 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note
);
1472 if (sched_verbose
&& added_notes
)
1473 fprintf (sched_dump
, ";; added %d line-number notes\n", added_notes
);
1476 /* After scheduling the function, delete redundant line notes from the
1480 rm_redundant_line_notes (void)
1483 rtx insn
= get_insns ();
1484 int active_insn
= 0;
1487 /* Walk the insns deleting redundant line-number notes. Many of these
1488 are already present. The remainder tend to occur at basic
1489 block boundaries. */
1490 for (insn
= get_last_insn (); insn
; insn
= PREV_INSN (insn
))
1491 if (NOTE_P (insn
) && NOTE_LINE_NUMBER (insn
) > 0)
1493 /* If there are no active insns following, INSN is redundant. */
1494 if (active_insn
== 0)
1497 SET_INSN_DELETED (insn
);
1499 /* If the line number is unchanged, LINE is redundant. */
1501 #ifdef USE_MAPPED_LOCATION
1502 && NOTE_SOURCE_LOCATION (line
) == NOTE_SOURCE_LOCATION (insn
)
1504 && NOTE_LINE_NUMBER (line
) == NOTE_LINE_NUMBER (insn
)
1505 && NOTE_SOURCE_FILE (line
) == NOTE_SOURCE_FILE (insn
)
1510 SET_INSN_DELETED (line
);
1517 else if (!((NOTE_P (insn
)
1518 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_DELETED
)
1519 || (NONJUMP_INSN_P (insn
)
1520 && (GET_CODE (PATTERN (insn
)) == USE
1521 || GET_CODE (PATTERN (insn
)) == CLOBBER
))))
1524 if (sched_verbose
&& notes
)
1525 fprintf (sched_dump
, ";; deleted %d line-number notes\n", notes
);
1528 /* Delete notes between HEAD and TAIL and put them in the chain
1529 of notes ended by NOTE_LIST. */
1532 rm_other_notes (rtx head
, rtx tail
)
1538 if (head
== tail
&& (! INSN_P (head
)))
1541 next_tail
= NEXT_INSN (tail
);
1542 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
1546 /* Farm out notes, and maybe save them in NOTE_LIST.
1547 This is needed to keep the debugger from
1548 getting completely deranged. */
1549 if (NOTE_NOT_BB_P (insn
))
1553 insn
= unlink_other_notes (insn
, next_tail
);
1555 gcc_assert (prev
!= tail
&& prev
!= head
&& insn
!= next_tail
);
1560 /* Functions for computation of registers live/usage info. */
1562 /* This function looks for a new register being defined.
1563 If the destination register is already used by the source,
1564 a new register is not needed. */
1567 find_set_reg_weight (rtx x
)
1569 if (GET_CODE (x
) == CLOBBER
1570 && register_operand (SET_DEST (x
), VOIDmode
))
1572 if (GET_CODE (x
) == SET
1573 && register_operand (SET_DEST (x
), VOIDmode
))
1575 if (REG_P (SET_DEST (x
)))
1577 if (!reg_mentioned_p (SET_DEST (x
), SET_SRC (x
)))
1587 /* Calculate INSN_REG_WEIGHT for all insns of a block. */
1590 find_insn_reg_weight (basic_block bb
)
1592 rtx insn
, next_tail
, head
, tail
;
1594 get_ebb_head_tail (bb
, bb
, &head
, &tail
);
1595 next_tail
= NEXT_INSN (tail
);
1597 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
1598 find_insn_reg_weight1 (insn
);
1601 /* Calculate INSN_REG_WEIGHT for single insntruction.
1602 Separated from find_insn_reg_weight because of need
1603 to initialize new instruction in generate_recovery_code. */
1605 find_insn_reg_weight1 (rtx insn
)
1610 /* Handle register life information. */
1611 if (! INSN_P (insn
))
1614 /* Increment weight for each register born here. */
1616 reg_weight
+= find_set_reg_weight (x
);
1617 if (GET_CODE (x
) == PARALLEL
)
1620 for (j
= XVECLEN (x
, 0) - 1; j
>= 0; j
--)
1622 x
= XVECEXP (PATTERN (insn
), 0, j
);
1623 reg_weight
+= find_set_reg_weight (x
);
1626 /* Decrement weight for each register that dies here. */
1627 for (x
= REG_NOTES (insn
); x
; x
= XEXP (x
, 1))
1629 if (REG_NOTE_KIND (x
) == REG_DEAD
1630 || REG_NOTE_KIND (x
) == REG_UNUSED
)
1634 INSN_REG_WEIGHT (insn
) = reg_weight
;
1637 /* Move insns that became ready to fire from queue to ready list. */
1640 queue_to_ready (struct ready_list
*ready
)
1645 q_ptr
= NEXT_Q (q_ptr
);
1647 /* Add all pending insns that can be scheduled without stalls to the
1649 for (link
= insn_queue
[q_ptr
]; link
; link
= XEXP (link
, 1))
1651 insn
= XEXP (link
, 0);
1654 if (sched_verbose
>= 2)
1655 fprintf (sched_dump
, ";;\t\tQ-->Ready: insn %s: ",
1656 (*current_sched_info
->print_insn
) (insn
, 0));
1658 ready_add (ready
, insn
, false);
1659 if (sched_verbose
>= 2)
1660 fprintf (sched_dump
, "moving to ready without stalls\n");
1662 free_INSN_LIST_list (&insn_queue
[q_ptr
]);
1664 /* If there are no ready insns, stall until one is ready and add all
1665 of the pending insns at that point to the ready list. */
1666 if (ready
->n_ready
== 0)
1670 for (stalls
= 1; stalls
<= max_insn_queue_index
; stalls
++)
1672 if ((link
= insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]))
1674 for (; link
; link
= XEXP (link
, 1))
1676 insn
= XEXP (link
, 0);
1679 if (sched_verbose
>= 2)
1680 fprintf (sched_dump
, ";;\t\tQ-->Ready: insn %s: ",
1681 (*current_sched_info
->print_insn
) (insn
, 0));
1683 ready_add (ready
, insn
, false);
1684 if (sched_verbose
>= 2)
1685 fprintf (sched_dump
, "moving to ready with %d stalls\n", stalls
);
1687 free_INSN_LIST_list (&insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]);
1689 advance_one_cycle ();
1694 advance_one_cycle ();
1697 q_ptr
= NEXT_Q_AFTER (q_ptr
, stalls
);
1698 clock_var
+= stalls
;
1702 /* Used by early_queue_to_ready. Determines whether it is "ok" to
1703 prematurely move INSN from the queue to the ready list. Currently,
1704 if a target defines the hook 'is_costly_dependence', this function
1705 uses the hook to check whether there exist any dependences which are
1706 considered costly by the target, between INSN and other insns that
1707 have already been scheduled. Dependences are checked up to Y cycles
1708 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
1709 controlling this value.
1710 (Other considerations could be taken into account instead (or in
1711 addition) depending on user flags and target hooks. */
1714 ok_for_early_queue_removal (rtx insn
)
1717 rtx prev_insn
= last_scheduled_insn
;
1719 if (targetm
.sched
.is_costly_dependence
)
1721 for (n_cycles
= flag_sched_stalled_insns_dep
; n_cycles
; n_cycles
--)
1723 for ( ; prev_insn
; prev_insn
= PREV_INSN (prev_insn
))
1728 if (!NOTE_P (prev_insn
))
1730 dep_link
= find_insn_list (insn
, INSN_DEPEND (prev_insn
));
1733 dep_cost
= insn_cost (prev_insn
, dep_link
, insn
) ;
1734 if (targetm
.sched
.is_costly_dependence (prev_insn
, insn
,
1736 flag_sched_stalled_insns_dep
- n_cycles
))
1741 if (GET_MODE (prev_insn
) == TImode
) /* end of dispatch group */
1747 prev_insn
= PREV_INSN (prev_insn
);
1755 /* Remove insns from the queue, before they become "ready" with respect
1756 to FU latency considerations. */
1759 early_queue_to_ready (state_t state
, struct ready_list
*ready
)
1767 state_t temp_state
= alloca (dfa_state_size
);
1769 int insns_removed
= 0;
1772 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
1775 X == 0: There is no limit on how many queued insns can be removed
1776 prematurely. (flag_sched_stalled_insns = -1).
1778 X >= 1: Only X queued insns can be removed prematurely in each
1779 invocation. (flag_sched_stalled_insns = X).
1781 Otherwise: Early queue removal is disabled.
1782 (flag_sched_stalled_insns = 0)
1785 if (! flag_sched_stalled_insns
)
1788 for (stalls
= 0; stalls
<= max_insn_queue_index
; stalls
++)
1790 if ((link
= insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]))
1792 if (sched_verbose
> 6)
1793 fprintf (sched_dump
, ";; look at index %d + %d\n", q_ptr
, stalls
);
1798 next_link
= XEXP (link
, 1);
1799 insn
= XEXP (link
, 0);
1800 if (insn
&& sched_verbose
> 6)
1801 print_rtl_single (sched_dump
, insn
);
1803 memcpy (temp_state
, state
, dfa_state_size
);
1804 if (recog_memoized (insn
) < 0)
1805 /* non-negative to indicate that it's not ready
1806 to avoid infinite Q->R->Q->R... */
1809 cost
= state_transition (temp_state
, insn
);
1811 if (sched_verbose
>= 6)
1812 fprintf (sched_dump
, "transition cost = %d\n", cost
);
1814 move_to_ready
= false;
1817 move_to_ready
= ok_for_early_queue_removal (insn
);
1818 if (move_to_ready
== true)
1820 /* move from Q to R */
1822 ready_add (ready
, insn
, false);
1825 XEXP (prev_link
, 1) = next_link
;
1827 insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)] = next_link
;
1829 free_INSN_LIST_node (link
);
1831 if (sched_verbose
>= 2)
1832 fprintf (sched_dump
, ";;\t\tEarly Q-->Ready: insn %s\n",
1833 (*current_sched_info
->print_insn
) (insn
, 0));
1836 if (insns_removed
== flag_sched_stalled_insns
)
1837 /* Remove no more than flag_sched_stalled_insns insns
1838 from Q at a time. */
1839 return insns_removed
;
1843 if (move_to_ready
== false)
1850 } /* for stalls.. */
1852 return insns_removed
;
1856 /* Print the ready list for debugging purposes. Callable from debugger. */
1859 debug_ready_list (struct ready_list
*ready
)
1864 if (ready
->n_ready
== 0)
1866 fprintf (sched_dump
, "\n");
1870 p
= ready_lastpos (ready
);
1871 for (i
= 0; i
< ready
->n_ready
; i
++)
1872 fprintf (sched_dump
, " %s", (*current_sched_info
->print_insn
) (p
[i
], 0));
1873 fprintf (sched_dump
, "\n");
1876 /* Search INSN for REG_SAVE_NOTE note pairs for
1877 NOTE_INSN_EHREGION_{BEG,END}; and convert them back into
1878 NOTEs. The REG_SAVE_NOTE note following first one is contains the
1879 saved value for NOTE_BLOCK_NUMBER which is useful for
1880 NOTE_INSN_EH_REGION_{BEG,END} NOTEs. */
1883 reemit_notes (rtx insn
)
1885 rtx note
, last
= insn
;
1887 for (note
= REG_NOTES (insn
); note
; note
= XEXP (note
, 1))
1889 if (REG_NOTE_KIND (note
) == REG_SAVE_NOTE
)
1891 enum insn_note note_type
= INTVAL (XEXP (note
, 0));
1893 last
= emit_note_before (note_type
, last
);
1894 remove_note (insn
, note
);
1899 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
1901 move_insn (rtx insn
)
1903 rtx last
= last_scheduled_insn
;
1905 if (PREV_INSN (insn
) != last
)
1911 bb
= BLOCK_FOR_INSN (insn
);
1913 /* BB_HEAD is either LABEL or NOTE. */
1914 gcc_assert (BB_HEAD (bb
) != insn
);
1916 if (BB_END (bb
) == insn
)
1917 /* If this is last instruction in BB, move end marker one
1920 /* Jumps are always placed at the end of basic block. */
1921 jump_p
= control_flow_insn_p (insn
);
1924 || ((current_sched_info
->flags
& SCHED_RGN
)
1925 && RECOVERY_BLOCK (insn
)
1926 && RECOVERY_BLOCK (insn
) != EXIT_BLOCK_PTR
)
1927 || (current_sched_info
->flags
& SCHED_EBB
));
1929 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn
)) == bb
);
1931 BB_END (bb
) = PREV_INSN (insn
);
1934 gcc_assert (BB_END (bb
) != last
);
1937 /* We move the block note along with jump. */
1939 /* NT is needed for assertion below. */
1940 rtx nt
= current_sched_info
->next_tail
;
1942 note
= NEXT_INSN (insn
);
1943 while (NOTE_NOT_BB_P (note
) && note
!= nt
)
1944 note
= NEXT_INSN (note
);
1948 || BARRIER_P (note
)))
1949 note
= NEXT_INSN (note
);
1951 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
1956 NEXT_INSN (PREV_INSN (insn
)) = NEXT_INSN (note
);
1957 PREV_INSN (NEXT_INSN (note
)) = PREV_INSN (insn
);
1959 NEXT_INSN (note
) = NEXT_INSN (last
);
1960 PREV_INSN (NEXT_INSN (last
)) = note
;
1962 NEXT_INSN (last
) = insn
;
1963 PREV_INSN (insn
) = last
;
1965 bb
= BLOCK_FOR_INSN (last
);
1969 fix_jump_move (insn
);
1971 if (BLOCK_FOR_INSN (insn
) != bb
)
1972 move_block_after_check (insn
);
1974 gcc_assert (BB_END (bb
) == last
);
1977 set_block_for_insn (insn
, bb
);
1979 /* Update BB_END, if needed. */
1980 if (BB_END (bb
) == last
)
1984 reemit_notes (insn
);
1986 SCHED_GROUP_P (insn
) = 0;
1989 /* The following structure describe an entry of the stack of choices. */
1992 /* Ordinal number of the issued insn in the ready queue. */
1994 /* The number of the rest insns whose issues we should try. */
1996 /* The number of issued essential insns. */
1998 /* State after issuing the insn. */
2002 /* The following array is used to implement a stack of choices used in
2003 function max_issue. */
2004 static struct choice_entry
*choice_stack
;
2006 /* The following variable value is number of essential insns issued on
2007 the current cycle. An insn is essential one if it changes the
2008 processors state. */
2009 static int cycle_issued_insns
;
2011 /* The following variable value is maximal number of tries of issuing
2012 insns for the first cycle multipass insn scheduling. We define
2013 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
2014 need this constraint if all real insns (with non-negative codes)
2015 had reservations because in this case the algorithm complexity is
2016 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
2017 might be incomplete and such insn might occur. For such
2018 descriptions, the complexity of algorithm (without the constraint)
2019 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
2020 static int max_lookahead_tries
;
2022 /* The following value is value of hook
2023 `first_cycle_multipass_dfa_lookahead' at the last call of
2025 static int cached_first_cycle_multipass_dfa_lookahead
= 0;
2027 /* The following value is value of `issue_rate' at the last call of
2029 static int cached_issue_rate
= 0;
2031 /* The following function returns maximal (or close to maximal) number
2032 of insns which can be issued on the same cycle and one of which
2033 insns is insns with the best rank (the first insn in READY). To
2034 make this function tries different samples of ready insns. READY
2035 is current queue `ready'. Global array READY_TRY reflects what
2036 insns are already issued in this try. MAX_POINTS is the sum of points
2037 of all instructions in READY. The function stops immediatelly,
2038 if it reached the such a solution, that all instruction can be issued.
2039 INDEX will contain index of the best insn in READY. The following
2040 function is used only for first cycle multipass scheduling. */
2042 max_issue (struct ready_list
*ready
, int *index
, int max_points
)
2044 int n
, i
, all
, n_ready
, best
, delay
, tries_num
, points
= -1;
2045 struct choice_entry
*top
;
2049 memcpy (choice_stack
->state
, curr_state
, dfa_state_size
);
2051 top
->rest
= cached_first_cycle_multipass_dfa_lookahead
;
2053 n_ready
= ready
->n_ready
;
2054 for (all
= i
= 0; i
< n_ready
; i
++)
2061 if (top
->rest
== 0 || i
>= n_ready
)
2063 if (top
== choice_stack
)
2065 if (best
< top
- choice_stack
&& ready_try
[0])
2067 best
= top
- choice_stack
;
2068 *index
= choice_stack
[1].index
;
2070 if (top
->n
== max_points
|| best
== all
)
2076 memcpy (curr_state
, top
->state
, dfa_state_size
);
2078 else if (!ready_try
[i
])
2081 if (tries_num
> max_lookahead_tries
)
2083 insn
= ready_element (ready
, i
);
2084 delay
= state_transition (curr_state
, insn
);
2087 if (state_dead_lock_p (curr_state
))
2092 if (memcmp (top
->state
, curr_state
, dfa_state_size
) != 0)
2093 n
+= ISSUE_POINTS (insn
);
2095 top
->rest
= cached_first_cycle_multipass_dfa_lookahead
;
2098 memcpy (top
->state
, curr_state
, dfa_state_size
);
2105 while (top
!= choice_stack
)
2107 ready_try
[top
->index
] = 0;
2110 memcpy (curr_state
, choice_stack
->state
, dfa_state_size
);
2112 if (sched_verbose
>= 4)
2113 fprintf (sched_dump
, ";;\t\tChoosed insn : %s; points: %d/%d\n",
2114 (*current_sched_info
->print_insn
) (ready_element (ready
, *index
),
2116 points
, max_points
);
2121 /* The following function chooses insn from READY and modifies
2122 *N_READY and READY. The following function is used only for first
2123 cycle multipass scheduling. */
2126 choose_ready (struct ready_list
*ready
)
2130 if (targetm
.sched
.first_cycle_multipass_dfa_lookahead
)
2131 lookahead
= targetm
.sched
.first_cycle_multipass_dfa_lookahead ();
2132 if (lookahead
<= 0 || SCHED_GROUP_P (ready_element (ready
, 0)))
2133 return ready_remove_first (ready
);
2136 /* Try to choose the better insn. */
2137 int index
= 0, i
, n
;
2139 int more_issue
, max_points
, try_data
= 1, try_control
= 1;
2141 if (cached_first_cycle_multipass_dfa_lookahead
!= lookahead
)
2143 cached_first_cycle_multipass_dfa_lookahead
= lookahead
;
2144 max_lookahead_tries
= 100;
2145 for (i
= 0; i
< issue_rate
; i
++)
2146 max_lookahead_tries
*= lookahead
;
2148 insn
= ready_element (ready
, 0);
2149 if (INSN_CODE (insn
) < 0)
2150 return ready_remove_first (ready
);
2153 && spec_info
->flags
& (PREFER_NON_DATA_SPEC
2154 | PREFER_NON_CONTROL_SPEC
))
2156 for (i
= 0, n
= ready
->n_ready
; i
< n
; i
++)
2161 x
= ready_element (ready
, i
);
2164 if (spec_info
->flags
& PREFER_NON_DATA_SPEC
2165 && !(s
& DATA_SPEC
))
2168 if (!(spec_info
->flags
& PREFER_NON_CONTROL_SPEC
)
2173 if (spec_info
->flags
& PREFER_NON_CONTROL_SPEC
2174 && !(s
& CONTROL_SPEC
))
2177 if (!(spec_info
->flags
& PREFER_NON_DATA_SPEC
) || !try_data
)
2183 if ((!try_data
&& (TODO_SPEC (insn
) & DATA_SPEC
))
2184 || (!try_control
&& (TODO_SPEC (insn
) & CONTROL_SPEC
))
2185 || (targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard_spec
2186 && !targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard_spec
2188 /* Discard speculative instruction that stands first in the ready
2191 change_queue_index (insn
, 1);
2195 max_points
= ISSUE_POINTS (insn
);
2196 more_issue
= issue_rate
- cycle_issued_insns
- 1;
2198 for (i
= 1; i
< ready
->n_ready
; i
++)
2200 insn
= ready_element (ready
, i
);
2202 = (INSN_CODE (insn
) < 0
2203 || (!try_data
&& (TODO_SPEC (insn
) & DATA_SPEC
))
2204 || (!try_control
&& (TODO_SPEC (insn
) & CONTROL_SPEC
))
2205 || (targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard
2206 && !targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard
2209 if (!ready_try
[i
] && more_issue
-- > 0)
2210 max_points
+= ISSUE_POINTS (insn
);
2213 if (max_issue (ready
, &index
, max_points
) == 0)
2214 return ready_remove_first (ready
);
2216 return ready_remove (ready
, index
);
2220 /* Use forward list scheduling to rearrange insns of block pointed to by
2221 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2225 schedule_block (basic_block
*target_bb
, int rgn_n_insns1
)
2227 struct ready_list ready
;
2228 int i
, first_cycle_insn_p
;
2230 state_t temp_state
= NULL
; /* It is used for multipass scheduling. */
2231 int sort_p
, advance
, start_clock_var
;
2233 /* Head/tail info for this block. */
2234 rtx prev_head
= current_sched_info
->prev_head
;
2235 rtx next_tail
= current_sched_info
->next_tail
;
2236 rtx head
= NEXT_INSN (prev_head
);
2237 rtx tail
= PREV_INSN (next_tail
);
2239 /* We used to have code to avoid getting parameters moved from hard
2240 argument registers into pseudos.
2242 However, it was removed when it proved to be of marginal benefit
2243 and caused problems because schedule_block and compute_forward_dependences
2244 had different notions of what the "head" insn was. */
2246 gcc_assert (head
!= tail
|| INSN_P (head
));
2248 added_recovery_block_p
= false;
2252 dump_new_block_header (0, *target_bb
, head
, tail
);
2254 state_reset (curr_state
);
2256 /* Allocate the ready list. */
2260 choice_stack
= NULL
;
2263 extend_ready (rgn_n_insns1
+ 1);
2265 ready
.first
= ready
.veclen
- 1;
2268 /* It is used for first cycle multipass scheduling. */
2269 temp_state
= alloca (dfa_state_size
);
2271 if (targetm
.sched
.md_init
)
2272 targetm
.sched
.md_init (sched_dump
, sched_verbose
, ready
.veclen
);
2274 /* We start inserting insns after PREV_HEAD. */
2275 last_scheduled_insn
= prev_head
;
2277 gcc_assert (NOTE_P (last_scheduled_insn
)
2278 && BLOCK_FOR_INSN (last_scheduled_insn
) == *target_bb
);
2280 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2285 insn_queue
= alloca ((max_insn_queue_index
+ 1) * sizeof (rtx
));
2286 memset (insn_queue
, 0, (max_insn_queue_index
+ 1) * sizeof (rtx
));
2288 /* Start just before the beginning of time. */
2291 /* We need queue and ready lists and clock_var be initialized
2292 in try_ready () (which is called through init_ready_list ()). */
2293 (*current_sched_info
->init_ready_list
) ();
2295 /* Now we can restore basic block notes and maintain precise cfg. */
2296 restore_bb_notes (*target_bb
);
2298 last_clock_var
= -1;
2303 /* Loop until all the insns in BB are scheduled. */
2304 while ((*current_sched_info
->schedule_more_p
) ())
2308 start_clock_var
= clock_var
;
2312 advance_one_cycle ();
2314 /* Add to the ready list all pending insns that can be issued now.
2315 If there are no ready insns, increment clock until one
2316 is ready and add all pending insns at that point to the ready
2318 queue_to_ready (&ready
);
2320 gcc_assert (ready
.n_ready
);
2322 if (sched_verbose
>= 2)
2324 fprintf (sched_dump
, ";;\t\tReady list after queue_to_ready: ");
2325 debug_ready_list (&ready
);
2327 advance
-= clock_var
- start_clock_var
;
2329 while (advance
> 0);
2333 /* Sort the ready list based on priority. */
2334 ready_sort (&ready
);
2336 if (sched_verbose
>= 2)
2338 fprintf (sched_dump
, ";;\t\tReady list after ready_sort: ");
2339 debug_ready_list (&ready
);
2343 /* Allow the target to reorder the list, typically for
2344 better instruction bundling. */
2345 if (sort_p
&& targetm
.sched
.reorder
2346 && (ready
.n_ready
== 0
2347 || !SCHED_GROUP_P (ready_element (&ready
, 0))))
2349 targetm
.sched
.reorder (sched_dump
, sched_verbose
,
2350 ready_lastpos (&ready
),
2351 &ready
.n_ready
, clock_var
);
2353 can_issue_more
= issue_rate
;
2355 first_cycle_insn_p
= 1;
2356 cycle_issued_insns
= 0;
2363 if (sched_verbose
>= 2)
2365 fprintf (sched_dump
, ";;\tReady list (t = %3d): ",
2367 debug_ready_list (&ready
);
2370 if (ready
.n_ready
== 0
2372 && reload_completed
)
2374 /* Allow scheduling insns directly from the queue in case
2375 there's nothing better to do (ready list is empty) but
2376 there are still vacant dispatch slots in the current cycle. */
2377 if (sched_verbose
>= 6)
2378 fprintf(sched_dump
,";;\t\tSecond chance\n");
2379 memcpy (temp_state
, curr_state
, dfa_state_size
);
2380 if (early_queue_to_ready (temp_state
, &ready
))
2381 ready_sort (&ready
);
2384 if (ready
.n_ready
== 0 || !can_issue_more
2385 || state_dead_lock_p (curr_state
)
2386 || !(*current_sched_info
->schedule_more_p
) ())
2389 /* Select and remove the insn from the ready list. */
2392 insn
= choose_ready (&ready
);
2397 insn
= ready_remove_first (&ready
);
2399 if (targetm
.sched
.dfa_new_cycle
2400 && targetm
.sched
.dfa_new_cycle (sched_dump
, sched_verbose
,
2401 insn
, last_clock_var
,
2402 clock_var
, &sort_p
))
2403 /* SORT_P is used by the target to override sorting
2404 of the ready list. This is needed when the target
2405 has modified its internal structures expecting that
2406 the insn will be issued next. As we need the insn
2407 to have the highest priority (so it will be returned by
2408 the ready_remove_first call above), we invoke
2409 ready_add (&ready, insn, true).
2410 But, still, there is one issue: INSN can be later
2411 discarded by scheduler's front end through
2412 current_sched_info->can_schedule_ready_p, hence, won't
2415 ready_add (&ready
, insn
, true);
2420 memcpy (temp_state
, curr_state
, dfa_state_size
);
2421 if (recog_memoized (insn
) < 0)
2423 asm_p
= (GET_CODE (PATTERN (insn
)) == ASM_INPUT
2424 || asm_noperands (PATTERN (insn
)) >= 0);
2425 if (!first_cycle_insn_p
&& asm_p
)
2426 /* This is asm insn which is tryed to be issued on the
2427 cycle not first. Issue it on the next cycle. */
2430 /* A USE insn, or something else we don't need to
2431 understand. We can't pass these directly to
2432 state_transition because it will trigger a
2433 fatal error for unrecognizable insns. */
2438 cost
= state_transition (temp_state
, insn
);
2447 queue_insn (insn
, cost
);
2448 if (SCHED_GROUP_P (insn
))
2457 if (current_sched_info
->can_schedule_ready_p
2458 && ! (*current_sched_info
->can_schedule_ready_p
) (insn
))
2459 /* We normally get here only if we don't want to move
2460 insn from the split block. */
2462 TODO_SPEC (insn
) = (TODO_SPEC (insn
) & ~SPECULATIVE
) | HARD_DEP
;
2466 /* DECISSION is made. */
2468 if (TODO_SPEC (insn
) & SPECULATIVE
)
2469 generate_recovery_code (insn
);
2471 if (control_flow_insn_p (last_scheduled_insn
)
2472 /* This is used to to switch basic blocks by request
2473 from scheduler front-end (actually, sched-ebb.c only).
2474 This is used to process blocks with single fallthru
2475 edge. If successing block has jump, it [jump] will try
2476 move at the end of current bb, thus corrupting CFG. */
2477 || current_sched_info
->advance_target_bb (*target_bb
, insn
))
2479 *target_bb
= current_sched_info
->advance_target_bb
2486 x
= next_real_insn (last_scheduled_insn
);
2488 dump_new_block_header (1, *target_bb
, x
, tail
);
2491 last_scheduled_insn
= bb_note (*target_bb
);
2494 /* Update counters, etc in the scheduler's front end. */
2495 (*current_sched_info
->begin_schedule_ready
) (insn
,
2496 last_scheduled_insn
);
2499 last_scheduled_insn
= insn
;
2501 if (memcmp (curr_state
, temp_state
, dfa_state_size
) != 0)
2503 cycle_issued_insns
++;
2504 memcpy (curr_state
, temp_state
, dfa_state_size
);
2507 if (targetm
.sched
.variable_issue
)
2509 targetm
.sched
.variable_issue (sched_dump
, sched_verbose
,
2510 insn
, can_issue_more
);
2511 /* A naked CLOBBER or USE generates no instruction, so do
2512 not count them against the issue rate. */
2513 else if (GET_CODE (PATTERN (insn
)) != USE
2514 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
2517 advance
= schedule_insn (insn
);
2519 /* After issuing an asm insn we should start a new cycle. */
2520 if (advance
== 0 && asm_p
)
2525 first_cycle_insn_p
= 0;
2527 /* Sort the ready list based on priority. This must be
2528 redone here, as schedule_insn may have readied additional
2529 insns that will not be sorted correctly. */
2530 if (ready
.n_ready
> 0)
2531 ready_sort (&ready
);
2533 if (targetm
.sched
.reorder2
2534 && (ready
.n_ready
== 0
2535 || !SCHED_GROUP_P (ready_element (&ready
, 0))))
2538 targetm
.sched
.reorder2 (sched_dump
, sched_verbose
,
2540 ? ready_lastpos (&ready
) : NULL
,
2541 &ready
.n_ready
, clock_var
);
2549 fprintf (sched_dump
, ";;\tReady list (final): ");
2550 debug_ready_list (&ready
);
2553 if (current_sched_info
->queue_must_finish_empty
)
2554 /* Sanity check -- queue must be empty now. Meaningless if region has
2556 gcc_assert (!q_size
&& !ready
.n_ready
);
2559 /* We must maintain QUEUE_INDEX between blocks in region. */
2560 for (i
= ready
.n_ready
- 1; i
>= 0; i
--)
2564 x
= ready_element (&ready
, i
);
2565 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
2566 TODO_SPEC (x
) = (TODO_SPEC (x
) & ~SPECULATIVE
) | HARD_DEP
;
2570 for (i
= 0; i
<= max_insn_queue_index
; i
++)
2573 for (link
= insn_queue
[i
]; link
; link
= XEXP (link
, 1))
2578 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
2579 TODO_SPEC (x
) = (TODO_SPEC (x
) & ~SPECULATIVE
) | HARD_DEP
;
2581 free_INSN_LIST_list (&insn_queue
[i
]);
2585 if (!current_sched_info
->queue_must_finish_empty
2586 || added_recovery_block_p
)
2588 /* INSN_TICK (minimum clock tick at which the insn becomes
2589 ready) may be not correct for the insn in the subsequent
2590 blocks of the region. We should use a correct value of
2591 `clock_var' or modify INSN_TICK. It is better to keep
2592 clock_var value equal to 0 at the start of a basic block.
2593 Therefore we modify INSN_TICK here. */
2594 fix_inter_tick (NEXT_INSN (prev_head
), last_scheduled_insn
);
2597 #ifdef ENABLE_CHECKING
2598 /* After the reload the ia64 backend doesn't maintain BB_END, so
2599 if we want to check anything, better do it now.
2600 And it already clobbered previously scheduled code. */
2601 if (reload_completed
)
2602 check_cfg (BB_HEAD (BLOCK_FOR_INSN (prev_head
)), 0);
2605 if (targetm
.sched
.md_finish
)
2606 targetm
.sched
.md_finish (sched_dump
, sched_verbose
);
2608 /* Update head/tail boundaries. */
2609 head
= NEXT_INSN (prev_head
);
2610 tail
= last_scheduled_insn
;
2612 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
2613 previously found among the insns. Insert them at the beginning
2617 basic_block head_bb
= BLOCK_FOR_INSN (head
);
2618 rtx note_head
= note_list
;
2620 while (PREV_INSN (note_head
))
2622 set_block_for_insn (note_head
, head_bb
);
2623 note_head
= PREV_INSN (note_head
);
2625 /* In the above cycle we've missed this note: */
2626 set_block_for_insn (note_head
, head_bb
);
2628 PREV_INSN (note_head
) = PREV_INSN (head
);
2629 NEXT_INSN (PREV_INSN (head
)) = note_head
;
2630 PREV_INSN (head
) = note_list
;
2631 NEXT_INSN (note_list
) = head
;
2638 fprintf (sched_dump
, ";; total time = %d\n;; new head = %d\n",
2639 clock_var
, INSN_UID (head
));
2640 fprintf (sched_dump
, ";; new tail = %d\n\n",
2644 current_sched_info
->head
= head
;
2645 current_sched_info
->tail
= tail
;
2650 for (i
= 0; i
<= rgn_n_insns
; i
++)
2651 free (choice_stack
[i
].state
);
2652 free (choice_stack
);
2655 /* Set_priorities: compute priority of each insn in the block. */
2658 set_priorities (rtx head
, rtx tail
)
2662 int sched_max_insns_priority
=
2663 current_sched_info
->sched_max_insns_priority
;
2666 if (head
== tail
&& (! INSN_P (head
)))
2671 prev_head
= PREV_INSN (head
);
2672 for (insn
= tail
; insn
!= prev_head
; insn
= PREV_INSN (insn
))
2678 (void) priority (insn
);
2680 if (INSN_PRIORITY_KNOWN (insn
))
2681 sched_max_insns_priority
=
2682 MAX (sched_max_insns_priority
, INSN_PRIORITY (insn
));
2685 current_sched_info
->sched_max_insns_priority
= sched_max_insns_priority
;
2690 /* Next LUID to assign to an instruction. */
2693 /* Initialize some global state for the scheduler. */
2702 /* Switch to working copy of sched_info. */
2703 memcpy (¤t_sched_info_var
, current_sched_info
,
2704 sizeof (current_sched_info_var
));
2705 current_sched_info
= ¤t_sched_info_var
;
2707 /* Disable speculative loads in their presence if cc0 defined. */
2709 flag_schedule_speculative_load
= 0;
2712 /* Set dump and sched_verbose for the desired debugging output. If no
2713 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
2714 For -fsched-verbose=N, N>=10, print everything to stderr. */
2715 sched_verbose
= sched_verbose_param
;
2716 if (sched_verbose_param
== 0 && dump_file
)
2718 sched_dump
= ((sched_verbose_param
>= 10 || !dump_file
)
2719 ? stderr
: dump_file
);
2721 /* Initialize SPEC_INFO. */
2722 if (targetm
.sched
.set_sched_flags
)
2724 spec_info
= &spec_info_var
;
2725 targetm
.sched
.set_sched_flags (spec_info
);
2726 if (current_sched_info
->flags
& DO_SPECULATION
)
2727 spec_info
->weakness_cutoff
=
2728 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF
) * MAX_DEP_WEAK
) / 100;
2730 /* So we won't read anything accidently. */
2732 #ifdef ENABLE_CHECKING
2733 check_sched_flags ();
2737 /* So we won't read anything accidently. */
2740 /* Initialize issue_rate. */
2741 if (targetm
.sched
.issue_rate
)
2742 issue_rate
= targetm
.sched
.issue_rate ();
2746 if (cached_issue_rate
!= issue_rate
)
2748 cached_issue_rate
= issue_rate
;
2749 /* To invalidate max_lookahead_tries: */
2750 cached_first_cycle_multipass_dfa_lookahead
= 0;
2757 for (i
= 0; i
< old_max_uid
; i
++)
2760 h_i_d
[i
].todo_spec
= HARD_DEP
;
2761 h_i_d
[i
].queue_index
= QUEUE_NOWHERE
;
2762 h_i_d
[i
].tick
= INVALID_TICK
;
2763 h_i_d
[i
].inter_tick
= INVALID_TICK
;
2766 if (targetm
.sched
.init_dfa_pre_cycle_insn
)
2767 targetm
.sched
.init_dfa_pre_cycle_insn ();
2769 if (targetm
.sched
.init_dfa_post_cycle_insn
)
2770 targetm
.sched
.init_dfa_post_cycle_insn ();
2773 dfa_state_size
= state_size ();
2774 curr_state
= xmalloc (dfa_state_size
);
2779 for (insn
= BB_HEAD (b
); ; insn
= NEXT_INSN (insn
))
2781 INSN_LUID (insn
) = luid
;
2783 /* Increment the next luid, unless this is a note. We don't
2784 really need separate IDs for notes and we don't want to
2785 schedule differently depending on whether or not there are
2786 line-number notes, i.e., depending on whether or not we're
2787 generating debugging information. */
2791 if (insn
== BB_END (b
))
2795 init_dependency_caches (luid
);
2797 init_alias_analysis ();
2800 old_last_basic_block
= 0;
2805 if (current_sched_info
->flags
& USE_GLAT
)
2808 /* Compute INSN_REG_WEIGHT for all blocks. We must do this before
2809 removing death notes. */
2810 FOR_EACH_BB_REVERSE (b
)
2811 find_insn_reg_weight (b
);
2813 if (targetm
.sched
.md_init_global
)
2814 targetm
.sched
.md_init_global (sched_dump
, sched_verbose
, old_max_uid
);
2816 nr_begin_data
= nr_begin_control
= nr_be_in_data
= nr_be_in_control
= 0;
2817 before_recovery
= 0;
2819 #ifdef ENABLE_CHECKING
2820 /* This is used preferably for finding bugs in check_cfg () itself. */
2825 /* Free global data used during insn scheduling. */
2833 free_dependency_caches ();
2834 end_alias_analysis ();
2835 free (line_note_head
);
2838 if (targetm
.sched
.md_finish_global
)
2839 targetm
.sched
.md_finish_global (sched_dump
, sched_verbose
);
2841 if (spec_info
&& spec_info
->dump
)
2843 char c
= reload_completed
? 'a' : 'b';
2845 fprintf (spec_info
->dump
,
2846 ";; %s:\n", current_function_name ());
2848 fprintf (spec_info
->dump
,
2849 ";; Procedure %cr-begin-data-spec motions == %d\n",
2851 fprintf (spec_info
->dump
,
2852 ";; Procedure %cr-be-in-data-spec motions == %d\n",
2854 fprintf (spec_info
->dump
,
2855 ";; Procedure %cr-begin-control-spec motions == %d\n",
2856 c
, nr_begin_control
);
2857 fprintf (spec_info
->dump
,
2858 ";; Procedure %cr-be-in-control-spec motions == %d\n",
2859 c
, nr_be_in_control
);
2862 #ifdef ENABLE_CHECKING
2863 /* After reload ia64 backend clobbers CFG, so can't check anything. */
2864 if (!reload_completed
)
2868 current_sched_info
= NULL
;
2871 /* Fix INSN_TICKs of the instructions in the current block as well as
2872 INSN_TICKs of their dependants.
2873 HEAD and TAIL are the begin and the end of the current scheduled block. */
2875 fix_inter_tick (rtx head
, rtx tail
)
2877 /* Set of instructions with corrected INSN_TICK. */
2878 bitmap_head processed
;
2879 int next_clock
= clock_var
+ 1;
2881 bitmap_initialize (&processed
, 0);
2883 /* Iterates over scheduled instructions and fix their INSN_TICKs and
2884 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
2885 across different blocks. */
2886 for (tail
= NEXT_INSN (tail
); head
!= tail
; head
= NEXT_INSN (head
))
2893 tick
= INSN_TICK (head
);
2894 gcc_assert (tick
>= MIN_TICK
);
2896 /* Fix INSN_TICK of instruction from just scheduled block. */
2897 if (!bitmap_bit_p (&processed
, INSN_LUID (head
)))
2899 bitmap_set_bit (&processed
, INSN_LUID (head
));
2902 if (tick
< MIN_TICK
)
2905 INSN_TICK (head
) = tick
;
2908 for (link
= INSN_DEPEND (head
); link
; link
= XEXP (link
, 1))
2912 next
= XEXP (link
, 0);
2913 tick
= INSN_TICK (next
);
2915 if (tick
!= INVALID_TICK
2916 /* If NEXT has its INSN_TICK calculated, fix it.
2917 If not - it will be properly calculated from
2918 scratch later in fix_tick_ready. */
2919 && !bitmap_bit_p (&processed
, INSN_LUID (next
)))
2921 bitmap_set_bit (&processed
, INSN_LUID (next
));
2924 if (tick
< MIN_TICK
)
2927 if (tick
> INTER_TICK (next
))
2928 INTER_TICK (next
) = tick
;
2930 tick
= INTER_TICK (next
);
2932 INSN_TICK (next
) = tick
;
2937 bitmap_clear (&processed
);
2940 /* Check if NEXT is ready to be added to the ready or queue list.
2941 If "yes", add it to the proper list.
2943 -1 - is not ready yet,
2944 0 - added to the ready list,
2945 0 < N - queued for N cycles. */
2947 try_ready (rtx next
)
2952 ts
= &TODO_SPEC (next
);
2955 gcc_assert (!(old_ts
& ~(SPECULATIVE
| HARD_DEP
))
2956 && ((old_ts
& HARD_DEP
)
2957 || (old_ts
& SPECULATIVE
)));
2959 if (!(current_sched_info
->flags
& DO_SPECULATION
))
2961 if (!LOG_LINKS (next
))
2966 *ts
&= ~SPECULATIVE
& ~HARD_DEP
;
2968 link
= LOG_LINKS (next
);
2971 /* LOG_LINKS are maintained sorted.
2972 So if DEP_STATUS of the first dep is SPECULATIVE,
2973 than all other deps are speculative too. */
2974 if (DEP_STATUS (link
) & SPECULATIVE
)
2976 /* Now we've got NEXT with speculative deps only.
2977 1. Look at the deps to see what we have to do.
2978 2. Check if we can do 'todo'. */
2979 *ts
= DEP_STATUS (link
) & SPECULATIVE
;
2980 while ((link
= XEXP (link
, 1)))
2981 *ts
= ds_merge (*ts
, DEP_STATUS (link
) & SPECULATIVE
);
2983 if (dep_weak (*ts
) < spec_info
->weakness_cutoff
)
2984 /* Too few points. */
2985 *ts
= (*ts
& ~SPECULATIVE
) | HARD_DEP
;
2993 gcc_assert (*ts
== old_ts
2994 && QUEUE_INDEX (next
) == QUEUE_NOWHERE
);
2995 else if (current_sched_info
->new_ready
)
2996 *ts
= current_sched_info
->new_ready (next
, *ts
);
2998 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
2999 have its original pattern or changed (speculative) one. This is due
3000 to changing ebb in region scheduling.
3001 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
3002 has speculative pattern.
3004 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
3005 control-speculative NEXT could have been discarded by sched-rgn.c
3006 (the same case as when discarded by can_schedule_ready_p ()). */
3008 if ((*ts
& SPECULATIVE
)
3009 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
3010 need to change anything. */
3016 gcc_assert ((*ts
& SPECULATIVE
) && !(*ts
& ~SPECULATIVE
));
3018 res
= speculate_insn (next
, *ts
, &new_pat
);
3023 /* It would be nice to change DEP_STATUS of all dependences,
3024 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
3025 so we won't reanalyze anything. */
3026 *ts
= (*ts
& ~SPECULATIVE
) | HARD_DEP
;
3030 /* We follow the rule, that every speculative insn
3031 has non-null ORIG_PAT. */
3032 if (!ORIG_PAT (next
))
3033 ORIG_PAT (next
) = PATTERN (next
);
3037 if (!ORIG_PAT (next
))
3038 /* If we gonna to overwrite the original pattern of insn,
3040 ORIG_PAT (next
) = PATTERN (next
);
3042 change_pattern (next
, new_pat
);
3050 /* We need to restore pattern only if (*ts == 0), because otherwise it is
3051 either correct (*ts & SPECULATIVE),
3052 or we simply don't care (*ts & HARD_DEP). */
3054 gcc_assert (!ORIG_PAT (next
)
3055 || !RECOVERY_BLOCK (next
)
3056 || RECOVERY_BLOCK (next
) == EXIT_BLOCK_PTR
);
3060 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
3061 control-speculative NEXT could have been discarded by sched-rgn.c
3062 (the same case as when discarded by can_schedule_ready_p ()). */
3063 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
3065 change_queue_index (next
, QUEUE_NOWHERE
);
3068 else if (!(*ts
& BEGIN_SPEC
) && ORIG_PAT (next
) && !RECOVERY_BLOCK (next
))
3069 /* We should change pattern of every previously speculative
3070 instruction - and we determine if NEXT was speculative by using
3071 ORIG_PAT field. Except one case - simple checks have ORIG_PAT
3072 pat too, hence we also check for the RECOVERY_BLOCK. */
3074 change_pattern (next
, ORIG_PAT (next
));
3075 ORIG_PAT (next
) = 0;
3078 if (sched_verbose
>= 2)
3080 int s
= TODO_SPEC (next
);
3082 fprintf (sched_dump
, ";;\t\tdependencies resolved: insn %s",
3083 (*current_sched_info
->print_insn
) (next
, 0));
3085 if (spec_info
&& spec_info
->dump
)
3088 fprintf (spec_info
->dump
, "; data-spec;");
3089 if (s
& BEGIN_CONTROL
)
3090 fprintf (spec_info
->dump
, "; control-spec;");
3091 if (s
& BE_IN_CONTROL
)
3092 fprintf (spec_info
->dump
, "; in-control-spec;");
3095 fprintf (sched_dump
, "\n");
3098 adjust_priority (next
);
3100 return fix_tick_ready (next
);
3103 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
3105 fix_tick_ready (rtx next
)
3110 link
= RESOLVED_DEPS (next
);
3116 tick
= INSN_TICK (next
);
3117 /* if tick is not equal to INVALID_TICK, then update
3118 INSN_TICK of NEXT with the most recent resolved dependence
3119 cost. Overwise, recalculate from scratch. */
3120 full_p
= tick
== INVALID_TICK
;
3126 pro
= XEXP (link
, 0);
3127 gcc_assert (INSN_TICK (pro
) >= MIN_TICK
);
3129 tick1
= INSN_TICK (pro
) + insn_cost (pro
, link
, next
);
3133 while ((link
= XEXP (link
, 1)) && full_p
);
3138 INSN_TICK (next
) = tick
;
3140 delay
= tick
- clock_var
;
3142 delay
= QUEUE_READY
;
3144 change_queue_index (next
, delay
);
3149 /* Move NEXT to the proper queue list with (DELAY >= 1),
3150 or add it to the ready list (DELAY == QUEUE_READY),
3151 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
3153 change_queue_index (rtx next
, int delay
)
3155 int i
= QUEUE_INDEX (next
);
3157 gcc_assert (QUEUE_NOWHERE
<= delay
&& delay
<= max_insn_queue_index
3159 gcc_assert (i
!= QUEUE_SCHEDULED
);
3161 if ((delay
> 0 && NEXT_Q_AFTER (q_ptr
, delay
) == i
)
3162 || (delay
< 0 && delay
== i
))
3163 /* We have nothing to do. */
3166 /* Remove NEXT from whereever it is now. */
3167 if (i
== QUEUE_READY
)
3168 ready_remove_insn (next
);
3170 queue_remove (next
);
3172 /* Add it to the proper place. */
3173 if (delay
== QUEUE_READY
)
3174 ready_add (readyp
, next
, false);
3175 else if (delay
>= 1)
3176 queue_insn (next
, delay
);
3178 if (sched_verbose
>= 2)
3180 fprintf (sched_dump
, ";;\t\ttick updated: insn %s",
3181 (*current_sched_info
->print_insn
) (next
, 0));
3183 if (delay
== QUEUE_READY
)
3184 fprintf (sched_dump
, " into ready\n");
3185 else if (delay
>= 1)
3186 fprintf (sched_dump
, " into queue with cost=%d\n", delay
);
3188 fprintf (sched_dump
, " removed from ready or queue lists\n");
3192 /* INSN is being scheduled. Resolve the dependence between INSN and NEXT. */
3194 resolve_dep (rtx next
, rtx insn
)
3198 INSN_DEP_COUNT (next
)--;
3200 dep
= remove_list_elem (insn
, &LOG_LINKS (next
));
3201 XEXP (dep
, 1) = RESOLVED_DEPS (next
);
3202 RESOLVED_DEPS (next
) = dep
;
3204 gcc_assert ((INSN_DEP_COUNT (next
) != 0 || !LOG_LINKS (next
))
3205 && (LOG_LINKS (next
) || INSN_DEP_COUNT (next
) == 0));
3208 /* Extend H_I_D data. */
3212 /* We use LUID 0 for the fake insn (UID 0) which holds dependencies for
3213 pseudos which do not cross calls. */
3214 int new_max_uid
= get_max_uid() + 1;
3216 h_i_d
= xrecalloc (h_i_d
, new_max_uid
, old_max_uid
, sizeof (*h_i_d
));
3217 old_max_uid
= new_max_uid
;
3219 if (targetm
.sched
.h_i_d_extended
)
3220 targetm
.sched
.h_i_d_extended ();
3223 /* Extend READY, READY_TRY and CHOICE_STACK arrays.
3224 N_NEW_INSNS is the number of additional elements to allocate. */
3226 extend_ready (int n_new_insns
)
3230 readyp
->veclen
= rgn_n_insns
+ n_new_insns
+ 1 + issue_rate
;
3231 readyp
->vec
= XRESIZEVEC (rtx
, readyp
->vec
, readyp
->veclen
);
3233 ready_try
= xrecalloc (ready_try
, rgn_n_insns
+ n_new_insns
+ 1,
3234 rgn_n_insns
+ 1, sizeof (char));
3236 rgn_n_insns
+= n_new_insns
;
3238 choice_stack
= XRESIZEVEC (struct choice_entry
, choice_stack
,
3241 for (i
= rgn_n_insns
; n_new_insns
--; i
--)
3242 choice_stack
[i
].state
= xmalloc (dfa_state_size
);
3245 /* Extend global scheduler structures (those, that live across calls to
3246 schedule_block) to include information about just emitted INSN. */
3248 extend_global (rtx insn
)
3250 gcc_assert (INSN_P (insn
));
3251 /* These structures have scheduler scope. */
3255 extend_dependency_caches (1, 0);
3258 /* Extends global and local scheduler structures to include information
3259 about just emitted INSN. */
3261 extend_all (rtx insn
)
3263 extend_global (insn
);
3265 /* These structures have block scope. */
3268 (*current_sched_info
->add_remove_insn
) (insn
, 0);
3271 /* Initialize h_i_d entry of the new INSN with default values.
3272 Values, that are not explicitly initialized here, hold zero. */
3274 init_h_i_d (rtx insn
)
3276 INSN_LUID (insn
) = luid
++;
3277 INSN_COST (insn
) = -1;
3278 TODO_SPEC (insn
) = HARD_DEP
;
3279 QUEUE_INDEX (insn
) = QUEUE_NOWHERE
;
3280 INSN_TICK (insn
) = INVALID_TICK
;
3281 INTER_TICK (insn
) = INVALID_TICK
;
3282 find_insn_reg_weight1 (insn
);
3285 /* Generates recovery code for INSN. */
3287 generate_recovery_code (rtx insn
)
3289 if (TODO_SPEC (insn
) & BEGIN_SPEC
)
3290 begin_speculative_block (insn
);
3292 /* Here we have insn with no dependencies to
3293 instructions other then CHECK_SPEC ones. */
3295 if (TODO_SPEC (insn
) & BE_IN_SPEC
)
3296 add_to_speculative_block (insn
);
3300 Tries to add speculative dependencies of type FS between instructions
3301 in LINK list and TWIN. */
3303 process_insn_depend_be_in_spec (rtx link
, rtx twin
, ds_t fs
)
3305 for (; link
; link
= XEXP (link
, 1))
3310 consumer
= XEXP (link
, 0);
3312 ds
= DEP_STATUS (link
);
3314 if (/* If we want to create speculative dep. */
3316 /* And we can do that because this is a true dep. */
3317 && (ds
& DEP_TYPES
) == DEP_TRUE
)
3319 gcc_assert (!(ds
& BE_IN_SPEC
));
3321 if (/* If this dep can be overcomed with 'begin speculation'. */
3323 /* Then we have a choice: keep the dep 'begin speculative'
3324 or transform it into 'be in speculative'. */
3326 if (/* In try_ready we assert that if insn once became ready
3327 it can be removed from the ready (or queue) list only
3328 due to backend decision. Hence we can't let the
3329 probability of the speculative dep to decrease. */
3330 dep_weak (ds
) <= dep_weak (fs
))
3331 /* Transform it to be in speculative. */
3332 ds
= (ds
& ~BEGIN_SPEC
) | fs
;
3335 /* Mark the dep as 'be in speculative'. */
3339 add_back_forw_dep (consumer
, twin
, REG_NOTE_KIND (link
), ds
);
3343 /* Generates recovery code for BEGIN speculative INSN. */
3345 begin_speculative_block (rtx insn
)
3347 if (TODO_SPEC (insn
) & BEGIN_DATA
)
3349 if (TODO_SPEC (insn
) & BEGIN_CONTROL
)
3352 create_check_block_twin (insn
, false);
3354 TODO_SPEC (insn
) &= ~BEGIN_SPEC
;
3357 /* Generates recovery code for BE_IN speculative INSN. */
3359 add_to_speculative_block (rtx insn
)
3362 rtx link
, twins
= NULL
;
3364 ts
= TODO_SPEC (insn
);
3365 gcc_assert (!(ts
& ~BE_IN_SPEC
));
3367 if (ts
& BE_IN_DATA
)
3369 if (ts
& BE_IN_CONTROL
)
3372 TODO_SPEC (insn
) &= ~BE_IN_SPEC
;
3373 gcc_assert (!TODO_SPEC (insn
));
3375 DONE_SPEC (insn
) |= ts
;
3377 /* First we convert all simple checks to branchy. */
3378 for (link
= LOG_LINKS (insn
); link
;)
3382 check
= XEXP (link
, 0);
3384 if (RECOVERY_BLOCK (check
))
3386 create_check_block_twin (check
, true);
3387 link
= LOG_LINKS (insn
);
3390 link
= XEXP (link
, 1);
3393 clear_priorities (insn
);
3397 rtx link
, check
, twin
;
3400 link
= LOG_LINKS (insn
);
3401 gcc_assert (!(DEP_STATUS (link
) & BEGIN_SPEC
)
3402 && (DEP_STATUS (link
) & BE_IN_SPEC
)
3403 && (DEP_STATUS (link
) & DEP_TYPES
) == DEP_TRUE
);
3405 check
= XEXP (link
, 0);
3406 gcc_assert (!RECOVERY_BLOCK (check
) && !ORIG_PAT (check
)
3407 && QUEUE_INDEX (check
) == QUEUE_NOWHERE
);
3409 rec
= BLOCK_FOR_INSN (check
);
3411 twin
= emit_insn_before (copy_rtx (PATTERN (insn
)), BB_END (rec
));
3412 extend_global (twin
);
3414 RESOLVED_DEPS (twin
) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn
));
3416 if (sched_verbose
&& spec_info
->dump
)
3417 /* INSN_BB (insn) isn't determined for twin insns yet.
3418 So we can't use current_sched_info->print_insn. */
3419 fprintf (spec_info
->dump
, ";;\t\tGenerated twin insn : %d/rec%d\n",
3420 INSN_UID (twin
), rec
->index
);
3422 twins
= alloc_INSN_LIST (twin
, twins
);
3424 /* Add dependences between TWIN and all apropriate
3425 instructions from REC. */
3428 add_back_forw_dep (twin
, check
, REG_DEP_TRUE
, DEP_TRUE
);
3432 link
= XEXP (link
, 1);
3435 check
= XEXP (link
, 0);
3436 if (BLOCK_FOR_INSN (check
) == rec
)
3446 process_insn_depend_be_in_spec (INSN_DEPEND (insn
), twin
, ts
);
3448 for (link
= LOG_LINKS (insn
); link
;)
3450 check
= XEXP (link
, 0);
3452 if (BLOCK_FOR_INSN (check
) == rec
)
3454 delete_back_forw_dep (insn
, check
);
3455 link
= LOG_LINKS (insn
);
3458 link
= XEXP (link
, 1);
3461 while (LOG_LINKS (insn
));
3463 /* We can't add the dependence between insn and twin earlier because
3464 that would make twin appear in the INSN_DEPEND (insn). */
3469 twin
= XEXP (twins
, 0);
3470 calc_priorities (twin
);
3471 add_back_forw_dep (twin
, insn
, REG_DEP_OUTPUT
, DEP_OUTPUT
);
3473 twin
= XEXP (twins
, 1);
3474 free_INSN_LIST_node (twins
);
3479 /* Extends and fills with zeros (only the new part) array pointed to by P. */
3481 xrecalloc (void *p
, size_t new_nmemb
, size_t old_nmemb
, size_t size
)
3483 gcc_assert (new_nmemb
>= old_nmemb
);
3484 p
= XRESIZEVAR (void, p
, new_nmemb
* size
);
3485 memset (((char *) p
) + old_nmemb
* size
, 0, (new_nmemb
- old_nmemb
) * size
);
3489 /* Return the probability of speculation success for the speculation
3497 dt
= FIRST_SPEC_TYPE
;
3502 res
*= (ds_t
) get_dep_weak (ds
, dt
);
3506 if (dt
== LAST_SPEC_TYPE
)
3508 dt
<<= SPEC_TYPE_SHIFT
;
3514 res
/= MAX_DEP_WEAK
;
3516 if (res
< MIN_DEP_WEAK
)
3519 gcc_assert (res
<= MAX_DEP_WEAK
);
3525 Find fallthru edge from PRED. */
3527 find_fallthru_edge (basic_block pred
)
3533 succ
= pred
->next_bb
;
3534 gcc_assert (succ
->prev_bb
== pred
);
3536 if (EDGE_COUNT (pred
->succs
) <= EDGE_COUNT (succ
->preds
))
3538 FOR_EACH_EDGE (e
, ei
, pred
->succs
)
3539 if (e
->flags
& EDGE_FALLTHRU
)
3541 gcc_assert (e
->dest
== succ
);
3547 FOR_EACH_EDGE (e
, ei
, succ
->preds
)
3548 if (e
->flags
& EDGE_FALLTHRU
)
3550 gcc_assert (e
->src
== pred
);
3558 /* Initialize BEFORE_RECOVERY variable. */
3560 init_before_recovery (void)
3565 last
= EXIT_BLOCK_PTR
->prev_bb
;
3566 e
= find_fallthru_edge (last
);
3570 /* We create two basic blocks:
3571 1. Single instruction block is inserted right after E->SRC
3573 2. Empty block right before EXIT_BLOCK.
3574 Between these two blocks recovery blocks will be emitted. */
3576 basic_block single
, empty
;
3579 single
= create_empty_bb (last
);
3580 empty
= create_empty_bb (single
);
3582 single
->count
= last
->count
;
3583 empty
->count
= last
->count
;
3584 single
->frequency
= last
->frequency
;
3585 empty
->frequency
= last
->frequency
;
3586 BB_COPY_PARTITION (single
, last
);
3587 BB_COPY_PARTITION (empty
, last
);
3589 redirect_edge_succ (e
, single
);
3590 make_single_succ_edge (single
, empty
, 0);
3591 make_single_succ_edge (empty
, EXIT_BLOCK_PTR
,
3592 EDGE_FALLTHRU
| EDGE_CAN_FALLTHRU
);
3594 label
= block_label (empty
);
3595 x
= emit_jump_insn_after (gen_jump (label
), BB_END (single
));
3596 JUMP_LABEL (x
) = label
;
3597 LABEL_NUSES (label
)++;
3600 emit_barrier_after (x
);
3602 add_block (empty
, 0);
3603 add_block (single
, 0);
3605 before_recovery
= single
;
3607 if (sched_verbose
>= 2 && spec_info
->dump
)
3608 fprintf (spec_info
->dump
,
3609 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
3610 last
->index
, single
->index
, empty
->index
);
3613 before_recovery
= last
;
3616 /* Returns new recovery block. */
3618 create_recovery_block (void)
3623 added_recovery_block_p
= true;
3625 if (!before_recovery
)
3626 init_before_recovery ();
3628 label
= gen_label_rtx ();
3629 gcc_assert (BARRIER_P (NEXT_INSN (BB_END (before_recovery
))));
3630 label
= emit_label_after (label
, NEXT_INSN (BB_END (before_recovery
)));
3632 rec
= create_basic_block (label
, label
, before_recovery
);
3633 emit_barrier_after (BB_END (rec
));
3635 if (BB_PARTITION (before_recovery
) != BB_UNPARTITIONED
)
3636 BB_SET_PARTITION (rec
, BB_COLD_PARTITION
);
3638 if (sched_verbose
&& spec_info
->dump
)
3639 fprintf (spec_info
->dump
, ";;\t\tGenerated recovery block rec%d\n",
3642 before_recovery
= rec
;
3647 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
3648 INSN is a simple check, that should be converted to branchy one. */
3650 create_check_block_twin (rtx insn
, bool mutate_p
)
3653 rtx label
, check
, twin
, link
;
3656 gcc_assert (ORIG_PAT (insn
)
3658 || (RECOVERY_BLOCK (insn
) == EXIT_BLOCK_PTR
3659 && !(TODO_SPEC (insn
) & SPECULATIVE
))));
3661 /* Create recovery block. */
3662 if (mutate_p
|| targetm
.sched
.needs_block_p (insn
))
3664 rec
= create_recovery_block ();
3665 label
= BB_HEAD (rec
);
3669 rec
= EXIT_BLOCK_PTR
;
3674 check
= targetm
.sched
.gen_check (insn
, label
, mutate_p
);
3676 if (rec
!= EXIT_BLOCK_PTR
)
3678 /* To have mem_reg alive at the beginning of second_bb,
3679 we emit check BEFORE insn, so insn after splitting
3680 insn will be at the beginning of second_bb, which will
3681 provide us with the correct life information. */
3682 check
= emit_jump_insn_before (check
, insn
);
3683 JUMP_LABEL (check
) = label
;
3684 LABEL_NUSES (label
)++;
3687 check
= emit_insn_before (check
, insn
);
3689 /* Extend data structures. */
3691 RECOVERY_BLOCK (check
) = rec
;
3693 if (sched_verbose
&& spec_info
->dump
)
3694 fprintf (spec_info
->dump
, ";;\t\tGenerated check insn : %s\n",
3695 (*current_sched_info
->print_insn
) (check
, 0));
3697 gcc_assert (ORIG_PAT (insn
));
3699 /* Initialize TWIN (twin is a dublicate of original instruction
3700 in the recovery block). */
3701 if (rec
!= EXIT_BLOCK_PTR
)
3705 for (link
= RESOLVED_DEPS (insn
); link
; link
= XEXP (link
, 1))
3706 if (DEP_STATUS (link
) & DEP_OUTPUT
)
3708 RESOLVED_DEPS (check
) =
3709 alloc_DEPS_LIST (XEXP (link
, 0), RESOLVED_DEPS (check
), DEP_TRUE
);
3710 PUT_REG_NOTE_KIND (RESOLVED_DEPS (check
), REG_DEP_TRUE
);
3713 twin
= emit_insn_after (ORIG_PAT (insn
), BB_END (rec
));
3714 extend_global (twin
);
3716 if (sched_verbose
&& spec_info
->dump
)
3717 /* INSN_BB (insn) isn't determined for twin insns yet.
3718 So we can't use current_sched_info->print_insn. */
3719 fprintf (spec_info
->dump
, ";;\t\tGenerated twin insn : %d/rec%d\n",
3720 INSN_UID (twin
), rec
->index
);
3724 ORIG_PAT (check
) = ORIG_PAT (insn
);
3725 HAS_INTERNAL_DEP (check
) = 1;
3727 /* ??? We probably should change all OUTPUT dependencies to
3731 RESOLVED_DEPS (twin
) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn
));
3733 if (rec
!= EXIT_BLOCK_PTR
)
3734 /* In case of branchy check, fix CFG. */
3736 basic_block first_bb
, second_bb
;
3741 first_bb
= BLOCK_FOR_INSN (check
);
3742 e
= split_block (first_bb
, check
);
3743 /* split_block emits note if *check == BB_END. Probably it
3744 is better to rip that note off. */
3745 gcc_assert (e
->src
== first_bb
);
3746 second_bb
= e
->dest
;
3748 /* This is fixing of incoming edge. */
3749 /* ??? Which other flags should be specified? */
3750 if (BB_PARTITION (first_bb
) != BB_PARTITION (rec
))
3751 /* Partition type is the same, if it is "unpartitioned". */
3752 edge_flags
= EDGE_CROSSING
;
3756 e
= make_edge (first_bb
, rec
, edge_flags
);
3758 add_block (second_bb
, first_bb
);
3760 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (second_bb
)));
3761 label
= block_label (second_bb
);
3762 jump
= emit_jump_insn_after (gen_jump (label
), BB_END (rec
));
3763 JUMP_LABEL (jump
) = label
;
3764 LABEL_NUSES (label
)++;
3765 extend_global (jump
);
3767 if (BB_PARTITION (second_bb
) != BB_PARTITION (rec
))
3768 /* Partition type is the same, if it is "unpartitioned". */
3770 /* Rewritten from cfgrtl.c. */
3771 if (flag_reorder_blocks_and_partition
3772 && targetm
.have_named_sections
3773 /*&& !any_condjump_p (jump)*/)
3774 /* any_condjump_p (jump) == false.
3775 We don't need the same note for the check because
3776 any_condjump_p (check) == true. */
3778 REG_NOTES (jump
) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP
,
3782 edge_flags
= EDGE_CROSSING
;
3787 make_single_succ_edge (rec
, second_bb
, edge_flags
);
3789 add_block (rec
, EXIT_BLOCK_PTR
);
3792 /* Move backward dependences from INSN to CHECK and
3793 move forward dependences from INSN to TWIN. */
3794 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
3798 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
3799 check --TRUE--> producer ??? or ANTI ???
3800 twin --TRUE--> producer
3801 twin --ANTI--> check
3803 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
3804 check --ANTI--> producer
3805 twin --ANTI--> producer
3806 twin --ANTI--> check
3808 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
3809 check ~~TRUE~~> producer
3810 twin ~~TRUE~~> producer
3811 twin --ANTI--> check */
3813 ds
= DEP_STATUS (link
);
3815 if (ds
& BEGIN_SPEC
)
3817 gcc_assert (!mutate_p
);
3821 if (rec
!= EXIT_BLOCK_PTR
)
3823 add_back_forw_dep (check
, XEXP (link
, 0), REG_NOTE_KIND (link
), ds
);
3824 add_back_forw_dep (twin
, XEXP (link
, 0), REG_NOTE_KIND (link
), ds
);
3827 add_back_forw_dep (check
, XEXP (link
, 0), REG_NOTE_KIND (link
), ds
);
3830 for (link
= LOG_LINKS (insn
); link
;)
3831 if ((DEP_STATUS (link
) & BEGIN_SPEC
)
3833 /* We can delete this dep only if we totally overcome it with
3834 BEGIN_SPECULATION. */
3836 delete_back_forw_dep (insn
, XEXP (link
, 0));
3837 link
= LOG_LINKS (insn
);
3840 link
= XEXP (link
, 1);
3844 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
3847 gcc_assert (!DONE_SPEC (insn
));
3851 ds_t ts
= TODO_SPEC (insn
);
3853 DONE_SPEC (insn
) = ts
& BEGIN_SPEC
;
3854 CHECK_SPEC (check
) = ts
& BEGIN_SPEC
;
3856 if (ts
& BEGIN_DATA
)
3857 fs
= set_dep_weak (fs
, BE_IN_DATA
, get_dep_weak (ts
, BEGIN_DATA
));
3858 if (ts
& BEGIN_CONTROL
)
3859 fs
= set_dep_weak (fs
, BE_IN_CONTROL
, get_dep_weak (ts
, BEGIN_CONTROL
));
3862 CHECK_SPEC (check
) = CHECK_SPEC (insn
);
3864 /* Future speculations: call the helper. */
3865 process_insn_depend_be_in_spec (INSN_DEPEND (insn
), twin
, fs
);
3867 if (rec
!= EXIT_BLOCK_PTR
)
3869 /* Which types of dependencies should we use here is,
3870 generally, machine-dependent question... But, for now,
3875 add_back_forw_dep (check
, insn
, REG_DEP_TRUE
, DEP_TRUE
);
3876 add_back_forw_dep (twin
, insn
, REG_DEP_OUTPUT
, DEP_OUTPUT
);
3880 if (spec_info
->dump
)
3881 fprintf (spec_info
->dump
, ";;\t\tRemoved simple check : %s\n",
3882 (*current_sched_info
->print_insn
) (insn
, 0));
3884 for (link
= INSN_DEPEND (insn
); link
; link
= INSN_DEPEND (insn
))
3885 delete_back_forw_dep (XEXP (link
, 0), insn
);
3887 if (QUEUE_INDEX (insn
) != QUEUE_NOWHERE
)
3890 sched_remove_insn (insn
);
3893 add_back_forw_dep (twin
, check
, REG_DEP_ANTI
, DEP_ANTI
);
3896 add_back_forw_dep (check
, insn
, REG_DEP_TRUE
, DEP_TRUE
| DEP_OUTPUT
);
3899 /* Fix priorities. If MUTATE_P is nonzero, this is not neccessary,
3900 because it'll be done later in add_to_speculative_block. */
3902 clear_priorities (twin
);
3903 calc_priorities (twin
);
3907 /* Removes dependency between instructions in the recovery block REC
3908 and usual region instructions. It keeps inner dependences so it
3909 won't be neccessary to recompute them. */
3911 fix_recovery_deps (basic_block rec
)
3913 rtx note
, insn
, link
, jump
, ready_list
= 0;
3914 bitmap_head in_ready
;
3916 bitmap_initialize (&in_ready
, 0);
3918 /* NOTE - a basic block note. */
3919 note
= NEXT_INSN (BB_HEAD (rec
));
3920 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
3921 insn
= BB_END (rec
);
3922 gcc_assert (JUMP_P (insn
));
3923 insn
= PREV_INSN (insn
);
3927 for (link
= INSN_DEPEND (insn
); link
;)
3931 consumer
= XEXP (link
, 0);
3933 if (BLOCK_FOR_INSN (consumer
) != rec
)
3935 delete_back_forw_dep (consumer
, insn
);
3937 if (!bitmap_bit_p (&in_ready
, INSN_LUID (consumer
)))
3939 ready_list
= alloc_INSN_LIST (consumer
, ready_list
);
3940 bitmap_set_bit (&in_ready
, INSN_LUID (consumer
));
3943 link
= INSN_DEPEND (insn
);
3947 gcc_assert ((DEP_STATUS (link
) & DEP_TYPES
) == DEP_TRUE
);
3949 link
= XEXP (link
, 1);
3953 insn
= PREV_INSN (insn
);
3955 while (insn
!= note
);
3957 bitmap_clear (&in_ready
);
3959 /* Try to add instructions to the ready or queue list. */
3960 for (link
= ready_list
; link
; link
= XEXP (link
, 1))
3961 try_ready (XEXP (link
, 0));
3962 free_INSN_LIST_list (&ready_list
);
3964 /* Fixing jump's dependences. */
3965 insn
= BB_HEAD (rec
);
3966 jump
= BB_END (rec
);
3968 gcc_assert (LABEL_P (insn
));
3969 insn
= NEXT_INSN (insn
);
3971 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn
));
3972 add_jump_dependencies (insn
, jump
);
3975 /* The function saves line notes at the beginning of block B. */
3977 associate_line_notes_with_blocks (basic_block b
)
3981 for (line
= BB_HEAD (b
); line
; line
= PREV_INSN (line
))
3982 if (NOTE_P (line
) && NOTE_LINE_NUMBER (line
) > 0)
3984 line_note_head
[b
->index
] = line
;
3987 /* Do a forward search as well, since we won't get to see the first
3988 notes in a basic block. */
3989 for (line
= BB_HEAD (b
); line
; line
= NEXT_INSN (line
))
3993 if (NOTE_P (line
) && NOTE_LINE_NUMBER (line
) > 0)
3994 line_note_head
[b
->index
] = line
;
3998 /* Changes pattern of the INSN to NEW_PAT. */
4000 change_pattern (rtx insn
, rtx new_pat
)
4004 t
= validate_change (insn
, &PATTERN (insn
), new_pat
, 0);
4006 /* Invalidate INSN_COST, so it'll be recalculated. */
4007 INSN_COST (insn
) = -1;
4008 /* Invalidate INSN_TICK, so it'll be recalculated. */
4009 INSN_TICK (insn
) = INVALID_TICK
;
4010 dfa_clear_single_insn_cache (insn
);
4014 /* -1 - can't speculate,
4015 0 - for speculation with REQUEST mode it is OK to use
4016 current instruction pattern,
4017 1 - need to change pattern for *NEW_PAT to be speculative. */
4019 speculate_insn (rtx insn
, ds_t request
, rtx
*new_pat
)
4021 gcc_assert (current_sched_info
->flags
& DO_SPECULATION
4022 && (request
& SPECULATIVE
));
4024 if (!NONJUMP_INSN_P (insn
)
4025 || HAS_INTERNAL_DEP (insn
)
4026 || SCHED_GROUP_P (insn
)
4027 || side_effects_p (PATTERN (insn
))
4028 || (request
& spec_info
->mask
) != request
)
4031 gcc_assert (!RECOVERY_BLOCK (insn
));
4033 if (request
& BE_IN_SPEC
)
4035 if (may_trap_p (PATTERN (insn
)))
4038 if (!(request
& BEGIN_SPEC
))
4042 return targetm
.sched
.speculate_insn (insn
, request
& BEGIN_SPEC
, new_pat
);
4045 /* Print some information about block BB, which starts with HEAD and
4046 ends with TAIL, before scheduling it.
4047 I is zero, if scheduler is about to start with the fresh ebb. */
4049 dump_new_block_header (int i
, basic_block bb
, rtx head
, rtx tail
)
4052 fprintf (sched_dump
,
4053 ";; ======================================================\n");
4055 fprintf (sched_dump
,
4056 ";; =====================ADVANCING TO=====================\n");
4057 fprintf (sched_dump
,
4058 ";; -- basic block %d from %d to %d -- %s reload\n",
4059 bb
->index
, INSN_UID (head
), INSN_UID (tail
),
4060 (reload_completed
? "after" : "before"));
4061 fprintf (sched_dump
,
4062 ";; ======================================================\n");
4063 fprintf (sched_dump
, "\n");
4066 /* Unlink basic block notes and labels and saves them, so they
4067 can be easily restored. We unlink basic block notes in EBB to
4068 provide back-compatability with the previous code, as target backends
4069 assume, that there'll be only instructions between
4070 current_sched_info->{head and tail}. We restore these notes as soon
4072 FIRST (LAST) is the first (last) basic block in the ebb.
4073 NB: In usual case (FIRST == LAST) nothing is really done. */
4075 unlink_bb_notes (basic_block first
, basic_block last
)
4077 /* We DON'T unlink basic block notes of the first block in the ebb. */
4081 bb_header
= xmalloc (last_basic_block
* sizeof (*bb_header
));
4083 /* Make a sentinel. */
4084 if (last
->next_bb
!= EXIT_BLOCK_PTR
)
4085 bb_header
[last
->next_bb
->index
] = 0;
4087 first
= first
->next_bb
;
4090 rtx prev
, label
, note
, next
;
4092 label
= BB_HEAD (last
);
4093 if (LABEL_P (label
))
4094 note
= NEXT_INSN (label
);
4097 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
4099 prev
= PREV_INSN (label
);
4100 next
= NEXT_INSN (note
);
4101 gcc_assert (prev
&& next
);
4103 NEXT_INSN (prev
) = next
;
4104 PREV_INSN (next
) = prev
;
4106 bb_header
[last
->index
] = label
;
4111 last
= last
->prev_bb
;
4116 /* Restore basic block notes.
4117 FIRST is the first basic block in the ebb. */
4119 restore_bb_notes (basic_block first
)
4124 /* We DON'T unlink basic block notes of the first block in the ebb. */
4125 first
= first
->next_bb
;
4126 /* Remember: FIRST is actually a second basic block in the ebb. */
4128 while (first
!= EXIT_BLOCK_PTR
4129 && bb_header
[first
->index
])
4131 rtx prev
, label
, note
, next
;
4133 label
= bb_header
[first
->index
];
4134 prev
= PREV_INSN (label
);
4135 next
= NEXT_INSN (prev
);
4137 if (LABEL_P (label
))
4138 note
= NEXT_INSN (label
);
4141 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
4143 bb_header
[first
->index
] = 0;
4145 NEXT_INSN (prev
) = label
;
4146 NEXT_INSN (note
) = next
;
4147 PREV_INSN (next
) = note
;
4149 first
= first
->next_bb
;
4156 /* Extend per basic block data structures of the scheduler.
4157 If BB is NULL, initialize structures for the whole CFG.
4158 Otherwise, initialize them for the just created BB. */
4160 extend_bb (basic_block bb
)
4164 if (write_symbols
!= NO_DEBUG
)
4166 /* Save-line-note-head:
4167 Determine the line-number at the start of each basic block.
4168 This must be computed and saved now, because after a basic block's
4169 predecessor has been scheduled, it is impossible to accurately
4170 determine the correct line number for the first insn of the block. */
4171 line_note_head
= xrecalloc (line_note_head
, last_basic_block
,
4172 old_last_basic_block
,
4173 sizeof (*line_note_head
));
4176 associate_line_notes_with_blocks (bb
);
4179 associate_line_notes_with_blocks (bb
);
4182 old_last_basic_block
= last_basic_block
;
4184 if (current_sched_info
->flags
& USE_GLAT
)
4186 glat_start
= xrealloc (glat_start
,
4187 last_basic_block
* sizeof (*glat_start
));
4188 glat_end
= xrealloc (glat_end
, last_basic_block
* sizeof (*glat_end
));
4191 /* The following is done to keep current_sched_info->next_tail non null. */
4193 insn
= BB_END (EXIT_BLOCK_PTR
->prev_bb
);
4194 if (NEXT_INSN (insn
) == 0
4197 /* Don't emit a NOTE if it would end up before a BARRIER. */
4198 && !BARRIER_P (NEXT_INSN (insn
))))
4200 emit_note_after (NOTE_INSN_DELETED
, insn
);
4201 /* Make insn to appear outside BB. */
4202 BB_END (EXIT_BLOCK_PTR
->prev_bb
) = insn
;
4206 /* Add a basic block BB to extended basic block EBB.
4207 If EBB is EXIT_BLOCK_PTR, then BB is recovery block.
4208 If EBB is NULL, then BB should be a new region. */
4210 add_block (basic_block bb
, basic_block ebb
)
4212 gcc_assert (current_sched_info
->flags
& DETACH_LIFE_INFO
4213 && bb
->il
.rtl
->global_live_at_start
== 0
4214 && bb
->il
.rtl
->global_live_at_end
== 0);
4218 glat_start
[bb
->index
] = 0;
4219 glat_end
[bb
->index
] = 0;
4221 if (current_sched_info
->add_block
)
4222 /* This changes only data structures of the front-end. */
4223 current_sched_info
->add_block (bb
, ebb
);
4227 Fix CFG after both in- and inter-block movement of
4228 control_flow_insn_p JUMP. */
4230 fix_jump_move (rtx jump
)
4232 basic_block bb
, jump_bb
, jump_bb_next
;
4234 bb
= BLOCK_FOR_INSN (PREV_INSN (jump
));
4235 jump_bb
= BLOCK_FOR_INSN (jump
);
4236 jump_bb_next
= jump_bb
->next_bb
;
4238 gcc_assert (current_sched_info
->flags
& SCHED_EBB
4239 || (RECOVERY_BLOCK (jump
)
4240 && RECOVERY_BLOCK (jump
) != EXIT_BLOCK_PTR
));
4242 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next
)))
4243 /* if jump_bb_next is not empty. */
4244 BB_END (jump_bb
) = BB_END (jump_bb_next
);
4246 if (BB_END (bb
) != PREV_INSN (jump
))
4247 /* Then there are instruction after jump that should be placed
4249 BB_END (jump_bb_next
) = BB_END (bb
);
4251 /* Otherwise jump_bb_next is empty. */
4252 BB_END (jump_bb_next
) = NEXT_INSN (BB_HEAD (jump_bb_next
));
4254 /* To make assertion in move_insn happy. */
4255 BB_END (bb
) = PREV_INSN (jump
);
4257 update_bb_for_insn (jump_bb_next
);
4260 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
4262 move_block_after_check (rtx jump
)
4264 basic_block bb
, jump_bb
, jump_bb_next
;
4267 bb
= BLOCK_FOR_INSN (PREV_INSN (jump
));
4268 jump_bb
= BLOCK_FOR_INSN (jump
);
4269 jump_bb_next
= jump_bb
->next_bb
;
4271 update_bb_for_insn (jump_bb
);
4273 gcc_assert (RECOVERY_BLOCK (jump
)
4274 || RECOVERY_BLOCK (BB_END (jump_bb_next
)));
4276 unlink_block (jump_bb_next
);
4277 link_block (jump_bb_next
, bb
);
4281 move_succs (&(jump_bb
->succs
), bb
);
4282 move_succs (&(jump_bb_next
->succs
), jump_bb
);
4283 move_succs (&t
, jump_bb_next
);
4285 if (current_sched_info
->fix_recovery_cfg
)
4286 current_sched_info
->fix_recovery_cfg
4287 (bb
->index
, jump_bb
->index
, jump_bb_next
->index
);
4290 /* Helper function for move_block_after_check.
4291 This functions attaches edge vector pointed to by SUCCSP to
4294 move_succs (VEC(edge
,gc
) **succsp
, basic_block to
)
4299 gcc_assert (to
->succs
== 0);
4301 to
->succs
= *succsp
;
4303 FOR_EACH_EDGE (e
, ei
, to
->succs
)
4309 /* Initialize GLAT (global_live_at_{start, end}) structures.
4310 GLAT structures are used to substitute global_live_{start, end}
4311 regsets during scheduling. This is neccessary to use such functions as
4312 split_block (), as they assume consistancy of register live information. */
4322 /* Helper function for init_glat. */
4324 init_glat1 (basic_block bb
)
4326 gcc_assert (bb
->il
.rtl
->global_live_at_start
!= 0
4327 && bb
->il
.rtl
->global_live_at_end
!= 0);
4329 glat_start
[bb
->index
] = bb
->il
.rtl
->global_live_at_start
;
4330 glat_end
[bb
->index
] = bb
->il
.rtl
->global_live_at_end
;
4332 if (current_sched_info
->flags
& DETACH_LIFE_INFO
)
4334 bb
->il
.rtl
->global_live_at_start
= 0;
4335 bb
->il
.rtl
->global_live_at_end
= 0;
4339 /* Attach reg_live_info back to basic blocks.
4340 Also save regsets, that should not have been changed during scheduling,
4341 for checking purposes (see check_reg_live). */
4343 attach_life_info (void)
4348 attach_life_info1 (bb
);
4351 /* Helper function for attach_life_info. */
4353 attach_life_info1 (basic_block bb
)
4355 gcc_assert (bb
->il
.rtl
->global_live_at_start
== 0
4356 && bb
->il
.rtl
->global_live_at_end
== 0);
4358 if (glat_start
[bb
->index
])
4360 gcc_assert (glat_end
[bb
->index
]);
4362 bb
->il
.rtl
->global_live_at_start
= glat_start
[bb
->index
];
4363 bb
->il
.rtl
->global_live_at_end
= glat_end
[bb
->index
];
4365 /* Make them NULL, so they won't be freed in free_glat. */
4366 glat_start
[bb
->index
] = 0;
4367 glat_end
[bb
->index
] = 0;
4369 #ifdef ENABLE_CHECKING
4370 if (bb
->index
< NUM_FIXED_BLOCKS
4371 || current_sched_info
->region_head_or_leaf_p (bb
, 0))
4373 glat_start
[bb
->index
] = ALLOC_REG_SET (®_obstack
);
4374 COPY_REG_SET (glat_start
[bb
->index
],
4375 bb
->il
.rtl
->global_live_at_start
);
4378 if (bb
->index
< NUM_FIXED_BLOCKS
4379 || current_sched_info
->region_head_or_leaf_p (bb
, 1))
4381 glat_end
[bb
->index
] = ALLOC_REG_SET (®_obstack
);
4382 COPY_REG_SET (glat_end
[bb
->index
], bb
->il
.rtl
->global_live_at_end
);
4388 gcc_assert (!glat_end
[bb
->index
]);
4390 bb
->il
.rtl
->global_live_at_start
= ALLOC_REG_SET (®_obstack
);
4391 bb
->il
.rtl
->global_live_at_end
= ALLOC_REG_SET (®_obstack
);
4395 /* Free GLAT information. */
4399 #ifdef ENABLE_CHECKING
4400 if (current_sched_info
->flags
& DETACH_LIFE_INFO
)
4406 if (glat_start
[bb
->index
])
4407 FREE_REG_SET (glat_start
[bb
->index
]);
4408 if (glat_end
[bb
->index
])
4409 FREE_REG_SET (glat_end
[bb
->index
]);
4418 /* Remove INSN from the instruction stream.
4419 INSN should have any dependencies. */
4421 sched_remove_insn (rtx insn
)
4423 change_queue_index (insn
, QUEUE_NOWHERE
);
4424 current_sched_info
->add_remove_insn (insn
, 1);
4428 /* Clear priorities of all instructions, that are
4429 forward dependent on INSN. */
4431 clear_priorities (rtx insn
)
4435 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
4439 pro
= XEXP (link
, 0);
4440 if (INSN_PRIORITY_KNOWN (pro
))
4442 INSN_PRIORITY_KNOWN (pro
) = 0;
4443 clear_priorities (pro
);
4448 /* Recompute priorities of instructions, whose priorities might have been
4449 changed due to changes in INSN. */
4451 calc_priorities (rtx insn
)
4455 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
4459 pro
= XEXP (link
, 0);
4460 if (!INSN_PRIORITY_KNOWN (pro
))
4463 calc_priorities (pro
);
4469 /* Add dependences between JUMP and other instructions in the recovery
4470 block. INSN is the first insn the recovery block. */
4472 add_jump_dependencies (rtx insn
, rtx jump
)
4476 insn
= NEXT_INSN (insn
);
4480 if (!INSN_DEPEND (insn
))
4481 add_back_forw_dep (jump
, insn
, REG_DEP_ANTI
, DEP_ANTI
);
4484 gcc_assert (LOG_LINKS (jump
));
4487 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
4489 bb_note (basic_block bb
)
4493 note
= BB_HEAD (bb
);
4495 note
= NEXT_INSN (note
);
4497 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
4501 #ifdef ENABLE_CHECKING
4502 extern void debug_spec_status (ds_t
);
4504 /* Dump information about the dependence status S. */
4506 debug_spec_status (ds_t s
)
4511 fprintf (f
, "BEGIN_DATA: %d; ", get_dep_weak (s
, BEGIN_DATA
));
4513 fprintf (f
, "BE_IN_DATA: %d; ", get_dep_weak (s
, BE_IN_DATA
));
4514 if (s
& BEGIN_CONTROL
)
4515 fprintf (f
, "BEGIN_CONTROL: %d; ", get_dep_weak (s
, BEGIN_CONTROL
));
4516 if (s
& BE_IN_CONTROL
)
4517 fprintf (f
, "BE_IN_CONTROL: %d; ", get_dep_weak (s
, BE_IN_CONTROL
));
4520 fprintf (f
, "HARD_DEP; ");
4523 fprintf (f
, "DEP_TRUE; ");
4525 fprintf (f
, "DEP_ANTI; ");
4527 fprintf (f
, "DEP_OUTPUT; ");
4532 /* Helper function for check_cfg.
4533 Return non-zero, if edge vector pointed to by EL has edge with TYPE in
4536 has_edge_p (VEC(edge
,gc
) *el
, int type
)
4541 FOR_EACH_EDGE (e
, ei
, el
)
4542 if (e
->flags
& type
)
4547 /* Check few properties of CFG between HEAD and TAIL.
4548 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
4549 instruction stream. */
4551 check_cfg (rtx head
, rtx tail
)
4555 int not_first
= 0, not_last
;
4558 head
= get_insns ();
4560 tail
= get_last_insn ();
4561 next_tail
= NEXT_INSN (tail
);
4565 not_last
= head
!= tail
;
4568 gcc_assert (NEXT_INSN (PREV_INSN (head
)) == head
);
4570 gcc_assert (PREV_INSN (NEXT_INSN (head
)) == head
);
4573 || (NOTE_INSN_BASIC_BLOCK_P (head
)
4575 || (not_first
&& !LABEL_P (PREV_INSN (head
))))))
4577 gcc_assert (bb
== 0);
4578 bb
= BLOCK_FOR_INSN (head
);
4580 gcc_assert (BB_HEAD (bb
) == head
);
4582 /* This is the case of jump table. See inside_basic_block_p (). */
4583 gcc_assert (LABEL_P (head
) && !inside_basic_block_p (head
));
4588 gcc_assert (!inside_basic_block_p (head
));
4589 head
= NEXT_INSN (head
);
4593 gcc_assert (inside_basic_block_p (head
)
4595 gcc_assert (BLOCK_FOR_INSN (head
) == bb
);
4599 head
= NEXT_INSN (head
);
4600 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head
));
4604 if (control_flow_insn_p (head
))
4606 gcc_assert (BB_END (bb
) == head
);
4608 if (any_uncondjump_p (head
))
4609 gcc_assert (EDGE_COUNT (bb
->succs
) == 1
4610 && BARRIER_P (NEXT_INSN (head
)));
4611 else if (any_condjump_p (head
))
4612 gcc_assert (EDGE_COUNT (bb
->succs
) > 1
4613 && !BARRIER_P (NEXT_INSN (head
)));
4615 if (BB_END (bb
) == head
)
4617 if (EDGE_COUNT (bb
->succs
) > 1)
4618 gcc_assert (control_flow_insn_p (head
)
4619 || has_edge_p (bb
->succs
, EDGE_COMPLEX
));
4623 head
= NEXT_INSN (head
);
4629 while (head
!= next_tail
);
4631 gcc_assert (bb
== 0);
4634 /* Perform few consistancy checks of flags in different data structures. */
4636 check_sched_flags (void)
4638 unsigned int f
= current_sched_info
->flags
;
4640 if (flag_sched_stalled_insns
)
4641 gcc_assert (!(f
& DO_SPECULATION
));
4642 if (f
& DO_SPECULATION
)
4643 gcc_assert (!flag_sched_stalled_insns
4644 && (f
& DETACH_LIFE_INFO
)
4646 && spec_info
->mask
);
4647 if (f
& DETACH_LIFE_INFO
)
4648 gcc_assert (f
& USE_GLAT
);
4651 /* Check global_live_at_{start, end} regsets.
4652 If FATAL_P is TRUE, then abort execution at the first failure.
4653 Overwise, print diagnostics to STDERR (this mode is for calling
4656 check_reg_live (bool fatal_p
)
4668 bool b
= bitmap_equal_p (bb
->il
.rtl
->global_live_at_start
,
4673 gcc_assert (!fatal_p
);
4675 fprintf (stderr
, ";; check_reg_live_at_start (%d) failed.\n", i
);
4681 bool b
= bitmap_equal_p (bb
->il
.rtl
->global_live_at_end
,
4686 gcc_assert (!fatal_p
);
4688 fprintf (stderr
, ";; check_reg_live_at_end (%d) failed.\n", i
);
4693 #endif /* ENABLE_CHECKING */
4695 #endif /* INSN_SCHEDULING */