2007-03-01 Paul Brook <paul@codesourcery.com>
[official-gcc.git] / gcc / haifa-sched.c
blobed3545999695b786dafa0ed53f75559d9f855e98
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 2, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
22 02110-1301, USA. */
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
55 remaining slots.
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
60 1. choose insn with the longest path to end of bb, ties
61 broken by
62 2. choose insn with least contribution to register pressure,
63 ties broken by
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
67 broken by
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using insn backward dependences
86 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
87 INSN_FORW_DEPS the purpose of forward list scheduling.
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
96 utilization.
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
101 of this case.
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
127 #include "config.h"
128 #include "system.h"
129 #include "coretypes.h"
130 #include "tm.h"
131 #include "toplev.h"
132 #include "rtl.h"
133 #include "tm_p.h"
134 #include "hard-reg-set.h"
135 #include "regs.h"
136 #include "function.h"
137 #include "flags.h"
138 #include "insn-config.h"
139 #include "insn-attr.h"
140 #include "except.h"
141 #include "toplev.h"
142 #include "recog.h"
143 #include "sched-int.h"
144 #include "target.h"
145 #include "output.h"
146 #include "params.h"
148 #ifdef INSN_SCHEDULING
150 /* issue_rate is the number of insns that can be scheduled in the same
151 machine cycle. It can be defined in the config/mach/mach.h file,
152 otherwise we set it to 1. */
154 static int issue_rate;
156 /* sched-verbose controls the amount of debugging output the
157 scheduler prints. It is controlled by -fsched-verbose=N:
158 N>0 and no -DSR : the output is directed to stderr.
159 N>=10 will direct the printouts to stderr (regardless of -dSR).
160 N=1: same as -dSR.
161 N=2: bb's probabilities, detailed ready list info, unit/insn info.
162 N=3: rtl at abort point, control-flow, regions info.
163 N=5: dependences info. */
165 static int sched_verbose_param = 0;
166 int sched_verbose = 0;
168 /* Debugging file. All printouts are sent to dump, which is always set,
169 either to stderr, or to the dump listing file (-dRS). */
170 FILE *sched_dump = 0;
172 /* Highest uid before scheduling. */
173 static int old_max_uid;
175 /* fix_sched_param() is called from toplev.c upon detection
176 of the -fsched-verbose=N option. */
178 void
179 fix_sched_param (const char *param, const char *val)
181 if (!strcmp (param, "verbose"))
182 sched_verbose_param = atoi (val);
183 else
184 warning (0, "fix_sched_param: unknown param: %s", param);
187 struct haifa_insn_data *h_i_d;
189 #define INSN_TICK(INSN) (h_i_d[INSN_UID (INSN)].tick)
190 #define INTER_TICK(INSN) (h_i_d[INSN_UID (INSN)].inter_tick)
192 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
193 then it should be recalculated from scratch. */
194 #define INVALID_TICK (-(max_insn_queue_index + 1))
195 /* The minimal value of the INSN_TICK of an instruction. */
196 #define MIN_TICK (-max_insn_queue_index)
198 /* Issue points are used to distinguish between instructions in max_issue ().
199 For now, all instructions are equally good. */
200 #define ISSUE_POINTS(INSN) 1
202 /* List of important notes we must keep around. This is a pointer to the
203 last element in the list. */
204 static rtx note_list;
206 static struct spec_info_def spec_info_var;
207 /* Description of the speculative part of the scheduling.
208 If NULL - no speculation. */
209 static spec_info_t spec_info;
211 /* True, if recovery block was added during scheduling of current block.
212 Used to determine, if we need to fix INSN_TICKs. */
213 static bool added_recovery_block_p;
215 /* Counters of different types of speculative instructions. */
216 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
218 /* Pointers to GLAT data. See init_glat for more information. */
219 regset *glat_start, *glat_end;
221 /* Array used in {unlink, restore}_bb_notes. */
222 static rtx *bb_header = 0;
224 /* Number of basic_blocks. */
225 static int old_last_basic_block;
227 /* Basic block after which recovery blocks will be created. */
228 static basic_block before_recovery;
230 /* Queues, etc. */
232 /* An instruction is ready to be scheduled when all insns preceding it
233 have already been scheduled. It is important to ensure that all
234 insns which use its result will not be executed until its result
235 has been computed. An insn is maintained in one of four structures:
237 (P) the "Pending" set of insns which cannot be scheduled until
238 their dependencies have been satisfied.
239 (Q) the "Queued" set of insns that can be scheduled when sufficient
240 time has passed.
241 (R) the "Ready" list of unscheduled, uncommitted insns.
242 (S) the "Scheduled" list of insns.
244 Initially, all insns are either "Pending" or "Ready" depending on
245 whether their dependencies are satisfied.
247 Insns move from the "Ready" list to the "Scheduled" list as they
248 are committed to the schedule. As this occurs, the insns in the
249 "Pending" list have their dependencies satisfied and move to either
250 the "Ready" list or the "Queued" set depending on whether
251 sufficient time has passed to make them ready. As time passes,
252 insns move from the "Queued" set to the "Ready" list.
254 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
255 unscheduled insns, i.e., those that are ready, queued, and pending.
256 The "Queued" set (Q) is implemented by the variable `insn_queue'.
257 The "Ready" list (R) is implemented by the variables `ready' and
258 `n_ready'.
259 The "Scheduled" list (S) is the new insn chain built by this pass.
261 The transition (R->S) is implemented in the scheduling loop in
262 `schedule_block' when the best insn to schedule is chosen.
263 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
264 insns move from the ready list to the scheduled list.
265 The transition (Q->R) is implemented in 'queue_to_insn' as time
266 passes or stalls are introduced. */
268 /* Implement a circular buffer to delay instructions until sufficient
269 time has passed. For the new pipeline description interface,
270 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
271 than maximal time of instruction execution computed by genattr.c on
272 the base maximal time of functional unit reservations and getting a
273 result. This is the longest time an insn may be queued. */
275 static rtx *insn_queue;
276 static int q_ptr = 0;
277 static int q_size = 0;
278 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
279 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
281 #define QUEUE_SCHEDULED (-3)
282 #define QUEUE_NOWHERE (-2)
283 #define QUEUE_READY (-1)
284 /* QUEUE_SCHEDULED - INSN is scheduled.
285 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
286 queue or ready list.
287 QUEUE_READY - INSN is in ready list.
288 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
290 #define QUEUE_INDEX(INSN) (h_i_d[INSN_UID (INSN)].queue_index)
292 /* The following variable value refers for all current and future
293 reservations of the processor units. */
294 state_t curr_state;
296 /* The following variable value is size of memory representing all
297 current and future reservations of the processor units. */
298 static size_t dfa_state_size;
300 /* The following array is used to find the best insn from ready when
301 the automaton pipeline interface is used. */
302 static char *ready_try;
304 /* Describe the ready list of the scheduler.
305 VEC holds space enough for all insns in the current region. VECLEN
306 says how many exactly.
307 FIRST is the index of the element with the highest priority; i.e. the
308 last one in the ready list, since elements are ordered by ascending
309 priority.
310 N_READY determines how many insns are on the ready list. */
312 struct ready_list
314 rtx *vec;
315 int veclen;
316 int first;
317 int n_ready;
320 /* The pointer to the ready list. */
321 static struct ready_list *readyp;
323 /* Scheduling clock. */
324 static int clock_var;
326 /* Number of instructions in current scheduling region. */
327 static int rgn_n_insns;
329 static int may_trap_exp (rtx, int);
331 /* Nonzero iff the address is comprised from at most 1 register. */
332 #define CONST_BASED_ADDRESS_P(x) \
333 (REG_P (x) \
334 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
335 || (GET_CODE (x) == LO_SUM)) \
336 && (CONSTANT_P (XEXP (x, 0)) \
337 || CONSTANT_P (XEXP (x, 1)))))
339 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
340 as found by analyzing insn's expression. */
342 static int
343 may_trap_exp (rtx x, int is_store)
345 enum rtx_code code;
347 if (x == 0)
348 return TRAP_FREE;
349 code = GET_CODE (x);
350 if (is_store)
352 if (code == MEM && may_trap_p (x))
353 return TRAP_RISKY;
354 else
355 return TRAP_FREE;
357 if (code == MEM)
359 /* The insn uses memory: a volatile load. */
360 if (MEM_VOLATILE_P (x))
361 return IRISKY;
362 /* An exception-free load. */
363 if (!may_trap_p (x))
364 return IFREE;
365 /* A load with 1 base register, to be further checked. */
366 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
367 return PFREE_CANDIDATE;
368 /* No info on the load, to be further checked. */
369 return PRISKY_CANDIDATE;
371 else
373 const char *fmt;
374 int i, insn_class = TRAP_FREE;
376 /* Neither store nor load, check if it may cause a trap. */
377 if (may_trap_p (x))
378 return TRAP_RISKY;
379 /* Recursive step: walk the insn... */
380 fmt = GET_RTX_FORMAT (code);
381 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
383 if (fmt[i] == 'e')
385 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
386 insn_class = WORST_CLASS (insn_class, tmp_class);
388 else if (fmt[i] == 'E')
390 int j;
391 for (j = 0; j < XVECLEN (x, i); j++)
393 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
394 insn_class = WORST_CLASS (insn_class, tmp_class);
395 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
396 break;
399 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
400 break;
402 return insn_class;
406 /* Classifies insn for the purpose of verifying that it can be
407 moved speculatively, by examining it's patterns, returning:
408 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
409 TRAP_FREE: non-load insn.
410 IFREE: load from a globally safe location.
411 IRISKY: volatile load.
412 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
413 being either PFREE or PRISKY. */
416 haifa_classify_insn (rtx insn)
418 rtx pat = PATTERN (insn);
419 int tmp_class = TRAP_FREE;
420 int insn_class = TRAP_FREE;
421 enum rtx_code code;
423 if (GET_CODE (pat) == PARALLEL)
425 int i, len = XVECLEN (pat, 0);
427 for (i = len - 1; i >= 0; i--)
429 code = GET_CODE (XVECEXP (pat, 0, i));
430 switch (code)
432 case CLOBBER:
433 /* Test if it is a 'store'. */
434 tmp_class = may_trap_exp (XEXP (XVECEXP (pat, 0, i), 0), 1);
435 break;
436 case SET:
437 /* Test if it is a store. */
438 tmp_class = may_trap_exp (SET_DEST (XVECEXP (pat, 0, i)), 1);
439 if (tmp_class == TRAP_RISKY)
440 break;
441 /* Test if it is a load. */
442 tmp_class
443 = WORST_CLASS (tmp_class,
444 may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)),
445 0));
446 break;
447 case COND_EXEC:
448 case TRAP_IF:
449 tmp_class = TRAP_RISKY;
450 break;
451 default:
454 insn_class = WORST_CLASS (insn_class, tmp_class);
455 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
456 break;
459 else
461 code = GET_CODE (pat);
462 switch (code)
464 case CLOBBER:
465 /* Test if it is a 'store'. */
466 tmp_class = may_trap_exp (XEXP (pat, 0), 1);
467 break;
468 case SET:
469 /* Test if it is a store. */
470 tmp_class = may_trap_exp (SET_DEST (pat), 1);
471 if (tmp_class == TRAP_RISKY)
472 break;
473 /* Test if it is a load. */
474 tmp_class =
475 WORST_CLASS (tmp_class,
476 may_trap_exp (SET_SRC (pat), 0));
477 break;
478 case COND_EXEC:
479 case TRAP_IF:
480 tmp_class = TRAP_RISKY;
481 break;
482 default:;
484 insn_class = tmp_class;
487 return insn_class;
490 /* Forward declarations. */
492 static int priority (rtx);
493 static int rank_for_schedule (const void *, const void *);
494 static void swap_sort (rtx *, int);
495 static void queue_insn (rtx, int);
496 static int schedule_insn (rtx);
497 static int find_set_reg_weight (rtx);
498 static void find_insn_reg_weight (basic_block);
499 static void find_insn_reg_weight1 (rtx);
500 static void adjust_priority (rtx);
501 static void advance_one_cycle (void);
503 /* Notes handling mechanism:
504 =========================
505 Generally, NOTES are saved before scheduling and restored after scheduling.
506 The scheduler distinguishes between two types of notes:
508 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
509 Before scheduling a region, a pointer to the note is added to the insn
510 that follows or precedes it. (This happens as part of the data dependence
511 computation). After scheduling an insn, the pointer contained in it is
512 used for regenerating the corresponding note (in reemit_notes).
514 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
515 these notes are put in a list (in rm_other_notes() and
516 unlink_other_notes ()). After scheduling the block, these notes are
517 inserted at the beginning of the block (in schedule_block()). */
519 static rtx unlink_other_notes (rtx, rtx);
520 static void reemit_notes (rtx);
522 static rtx *ready_lastpos (struct ready_list *);
523 static void ready_add (struct ready_list *, rtx, bool);
524 static void ready_sort (struct ready_list *);
525 static rtx ready_remove_first (struct ready_list *);
527 static void queue_to_ready (struct ready_list *);
528 static int early_queue_to_ready (state_t, struct ready_list *);
530 static void debug_ready_list (struct ready_list *);
532 static void move_insn (rtx);
534 /* The following functions are used to implement multi-pass scheduling
535 on the first cycle. */
536 static rtx ready_element (struct ready_list *, int);
537 static rtx ready_remove (struct ready_list *, int);
538 static void ready_remove_insn (rtx);
539 static int max_issue (struct ready_list *, int *, int);
541 static rtx choose_ready (struct ready_list *);
543 static void fix_inter_tick (rtx, rtx);
544 static int fix_tick_ready (rtx);
545 static void change_queue_index (rtx, int);
547 /* The following functions are used to implement scheduling of data/control
548 speculative instructions. */
550 static void extend_h_i_d (void);
551 static void extend_ready (int);
552 static void extend_global (rtx);
553 static void extend_all (rtx);
554 static void init_h_i_d (rtx);
555 static void generate_recovery_code (rtx);
556 static void process_insn_forw_deps_be_in_spec (deps_list_t, rtx, ds_t);
557 static void begin_speculative_block (rtx);
558 static void add_to_speculative_block (rtx);
559 static dw_t dep_weak (ds_t);
560 static edge find_fallthru_edge (basic_block);
561 static void init_before_recovery (void);
562 static basic_block create_recovery_block (void);
563 static void create_check_block_twin (rtx, bool);
564 static void fix_recovery_deps (basic_block);
565 static void change_pattern (rtx, rtx);
566 static int speculate_insn (rtx, ds_t, rtx *);
567 static void dump_new_block_header (int, basic_block, rtx, rtx);
568 static void restore_bb_notes (basic_block);
569 static void extend_bb (void);
570 static void fix_jump_move (rtx);
571 static void move_block_after_check (rtx);
572 static void move_succs (VEC(edge,gc) **, basic_block);
573 static void init_glat (void);
574 static void init_glat1 (basic_block);
575 static void attach_life_info1 (basic_block);
576 static void free_glat (void);
577 static void sched_remove_insn (rtx);
578 static void clear_priorities (rtx);
579 static void add_jump_dependencies (rtx, rtx);
580 static void calc_priorities (rtx);
581 #ifdef ENABLE_CHECKING
582 static int has_edge_p (VEC(edge,gc) *, int);
583 static void check_cfg (rtx, rtx);
584 static void check_sched_flags (void);
585 #endif
587 #endif /* INSN_SCHEDULING */
589 /* Point to state used for the current scheduling pass. */
590 struct sched_info *current_sched_info;
592 #ifndef INSN_SCHEDULING
593 void
594 schedule_insns (void)
597 #else
599 /* Working copy of frontend's sched_info variable. */
600 static struct sched_info current_sched_info_var;
602 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
603 so that insns independent of the last scheduled insn will be preferred
604 over dependent instructions. */
606 static rtx last_scheduled_insn;
608 /* Cached cost of the instruction. Use below function to get cost of the
609 insn. -1 here means that the field is not initialized. */
610 #define INSN_COST(INSN) (h_i_d[INSN_UID (INSN)].cost)
612 /* Compute cost of executing INSN.
613 This is the number of cycles between instruction issue and
614 instruction results. */
615 HAIFA_INLINE int
616 insn_cost (rtx insn)
618 int cost = INSN_COST (insn);
620 if (cost < 0)
622 /* A USE insn, or something else we don't need to
623 understand. We can't pass these directly to
624 result_ready_cost or insn_default_latency because it will
625 trigger a fatal error for unrecognizable insns. */
626 if (recog_memoized (insn) < 0)
628 INSN_COST (insn) = 0;
629 return 0;
631 else
633 cost = insn_default_latency (insn);
634 if (cost < 0)
635 cost = 0;
637 INSN_COST (insn) = cost;
641 return cost;
644 /* Compute cost of dependence LINK.
645 This is the number of cycles between instruction issue and
646 instruction results. */
648 dep_cost (dep_t link)
650 rtx used = DEP_CON (link);
651 int cost;
653 /* A USE insn should never require the value used to be computed.
654 This allows the computation of a function's result and parameter
655 values to overlap the return and call. */
656 if (recog_memoized (used) < 0)
657 cost = 0;
658 else
660 rtx insn = DEP_PRO (link);
661 enum reg_note dep_type = DEP_KIND (link);
663 cost = insn_cost (insn);
665 if (INSN_CODE (insn) >= 0)
667 if (dep_type == REG_DEP_ANTI)
668 cost = 0;
669 else if (dep_type == REG_DEP_OUTPUT)
671 cost = (insn_default_latency (insn)
672 - insn_default_latency (used));
673 if (cost <= 0)
674 cost = 1;
676 else if (bypass_p (insn))
677 cost = insn_latency (insn, used);
680 if (targetm.sched.adjust_cost != NULL)
682 /* This variable is used for backward compatibility with the
683 targets. */
684 rtx dep_cost_rtx_link = alloc_INSN_LIST (NULL_RTX, NULL_RTX);
686 /* Make it self-cycled, so that if some tries to walk over this
687 incomplete list he/she will be caught in an endless loop. */
688 XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
690 /* Targets use only REG_NOTE_KIND of the link. */
691 PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_KIND (link));
693 cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
694 insn, cost);
696 free_INSN_LIST_node (dep_cost_rtx_link);
699 if (cost < 0)
700 cost = 0;
703 return cost;
706 /* Compute the priority number for INSN. */
708 static int
709 priority (rtx insn)
711 dep_link_t link;
713 if (! INSN_P (insn))
714 return 0;
716 if (! INSN_PRIORITY_KNOWN (insn))
718 int this_priority = 0;
720 if (deps_list_empty_p (INSN_FORW_DEPS (insn)))
721 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
722 some forward deps but all of them are ignored by
723 contributes_to_priority hook. At the moment we set priority of
724 such insn to 0. */
725 this_priority = insn_cost (insn);
726 else
728 rtx prev_first, twin;
729 basic_block rec;
731 /* For recovery check instructions we calculate priority slightly
732 different than that of normal instructions. Instead of walking
733 through INSN_FORW_DEPS (check) list, we walk through
734 INSN_FORW_DEPS list of each instruction in the corresponding
735 recovery block. */
737 rec = RECOVERY_BLOCK (insn);
738 if (!rec || rec == EXIT_BLOCK_PTR)
740 prev_first = PREV_INSN (insn);
741 twin = insn;
743 else
745 prev_first = NEXT_INSN (BB_HEAD (rec));
746 twin = PREV_INSN (BB_END (rec));
751 FOR_EACH_DEP_LINK (link, INSN_FORW_DEPS (twin))
753 rtx next;
754 int next_priority;
755 dep_t dep = DEP_LINK_DEP (link);
757 next = DEP_CON (dep);
759 if (BLOCK_FOR_INSN (next) != rec)
761 int cost;
763 /* Critical path is meaningful in block boundaries
764 only. */
765 if (! (*current_sched_info->contributes_to_priority)
766 (next, insn)
767 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
768 then speculative instructions will less likely be
769 scheduled. That is because the priority of
770 their producers will increase, and, thus, the
771 producers will more likely be scheduled, thus,
772 resolving the dependence. */
773 || ((current_sched_info->flags & DO_SPECULATION)
774 && (DEP_STATUS (dep) & SPECULATIVE)
775 && !(spec_info->flags
776 & COUNT_SPEC_IN_CRITICAL_PATH)))
777 continue;
779 if (twin == insn)
780 cost = dep_cost (dep);
781 else
783 struct _dep _dep1, *dep1 = &_dep1;
785 init_dep (dep1, insn, next, REG_DEP_ANTI);
787 cost = dep_cost (dep1);
790 next_priority = cost + priority (next);
792 if (next_priority > this_priority)
793 this_priority = next_priority;
797 twin = PREV_INSN (twin);
799 while (twin != prev_first);
801 INSN_PRIORITY (insn) = this_priority;
802 INSN_PRIORITY_KNOWN (insn) = 1;
805 return INSN_PRIORITY (insn);
808 /* Macros and functions for keeping the priority queue sorted, and
809 dealing with queuing and dequeuing of instructions. */
811 #define SCHED_SORT(READY, N_READY) \
812 do { if ((N_READY) == 2) \
813 swap_sort (READY, N_READY); \
814 else if ((N_READY) > 2) \
815 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
816 while (0)
818 /* Returns a positive value if x is preferred; returns a negative value if
819 y is preferred. Should never return 0, since that will make the sort
820 unstable. */
822 static int
823 rank_for_schedule (const void *x, const void *y)
825 rtx tmp = *(const rtx *) y;
826 rtx tmp2 = *(const rtx *) x;
827 dep_link_t link1, link2;
828 int tmp_class, tmp2_class;
829 int val, priority_val, weight_val, info_val;
831 /* The insn in a schedule group should be issued the first. */
832 if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
833 return SCHED_GROUP_P (tmp2) ? 1 : -1;
835 /* Prefer insn with higher priority. */
836 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
838 if (priority_val)
839 return priority_val;
841 /* Prefer speculative insn with greater dependencies weakness. */
842 if (spec_info)
844 ds_t ds1, ds2;
845 dw_t dw1, dw2;
846 int dw;
848 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
849 if (ds1)
850 dw1 = dep_weak (ds1);
851 else
852 dw1 = NO_DEP_WEAK;
854 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
855 if (ds2)
856 dw2 = dep_weak (ds2);
857 else
858 dw2 = NO_DEP_WEAK;
860 dw = dw2 - dw1;
861 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
862 return dw;
865 /* Prefer an insn with smaller contribution to registers-pressure. */
866 if (!reload_completed &&
867 (weight_val = INSN_REG_WEIGHT (tmp) - INSN_REG_WEIGHT (tmp2)))
868 return weight_val;
870 info_val = (*current_sched_info->rank) (tmp, tmp2);
871 if (info_val)
872 return info_val;
874 /* Compare insns based on their relation to the last-scheduled-insn. */
875 if (INSN_P (last_scheduled_insn))
877 /* Classify the instructions into three classes:
878 1) Data dependent on last schedule insn.
879 2) Anti/Output dependent on last scheduled insn.
880 3) Independent of last scheduled insn, or has latency of one.
881 Choose the insn from the highest numbered class if different. */
882 link1
883 = find_link_by_con_in_deps_list (INSN_FORW_DEPS (last_scheduled_insn),
884 tmp);
886 if (link1 == NULL || dep_cost (DEP_LINK_DEP (link1)) == 1)
887 tmp_class = 3;
888 else if (/* Data dependence. */
889 DEP_LINK_KIND (link1) == REG_DEP_TRUE)
890 tmp_class = 1;
891 else
892 tmp_class = 2;
894 link2
895 = find_link_by_con_in_deps_list (INSN_FORW_DEPS (last_scheduled_insn),
896 tmp2);
898 if (link2 == NULL || dep_cost (DEP_LINK_DEP (link2)) == 1)
899 tmp2_class = 3;
900 else if (/* Data dependence. */
901 DEP_LINK_KIND (link2) == REG_DEP_TRUE)
902 tmp2_class = 1;
903 else
904 tmp2_class = 2;
906 if ((val = tmp2_class - tmp_class))
907 return val;
910 /* Prefer the insn which has more later insns that depend on it.
911 This gives the scheduler more freedom when scheduling later
912 instructions at the expense of added register pressure. */
914 link1 = DEPS_LIST_FIRST (INSN_FORW_DEPS (tmp));
915 link2 = DEPS_LIST_FIRST (INSN_FORW_DEPS (tmp2));
917 while (link1 != NULL && link2 != NULL)
919 link1 = DEP_LINK_NEXT (link1);
920 link2 = DEP_LINK_NEXT (link2);
923 if (link1 != NULL && link2 == NULL)
924 /* TMP (Y) has more insns that depend on it. */
925 return -1;
926 if (link1 == NULL && link2 != NULL)
927 /* TMP2 (X) has more insns that depend on it. */
928 return 1;
930 /* If insns are equally good, sort by INSN_LUID (original insn order),
931 so that we make the sort stable. This minimizes instruction movement,
932 thus minimizing sched's effect on debugging and cross-jumping. */
933 return INSN_LUID (tmp) - INSN_LUID (tmp2);
936 /* Resort the array A in which only element at index N may be out of order. */
938 HAIFA_INLINE static void
939 swap_sort (rtx *a, int n)
941 rtx insn = a[n - 1];
942 int i = n - 2;
944 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
946 a[i + 1] = a[i];
947 i -= 1;
949 a[i + 1] = insn;
952 /* Add INSN to the insn queue so that it can be executed at least
953 N_CYCLES after the currently executing insn. Preserve insns
954 chain for debugging purposes. */
956 HAIFA_INLINE static void
957 queue_insn (rtx insn, int n_cycles)
959 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
960 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
962 gcc_assert (n_cycles <= max_insn_queue_index);
964 insn_queue[next_q] = link;
965 q_size += 1;
967 if (sched_verbose >= 2)
969 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
970 (*current_sched_info->print_insn) (insn, 0));
972 fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
975 QUEUE_INDEX (insn) = next_q;
978 /* Remove INSN from queue. */
979 static void
980 queue_remove (rtx insn)
982 gcc_assert (QUEUE_INDEX (insn) >= 0);
983 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
984 q_size--;
985 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
988 /* Return a pointer to the bottom of the ready list, i.e. the insn
989 with the lowest priority. */
991 HAIFA_INLINE static rtx *
992 ready_lastpos (struct ready_list *ready)
994 gcc_assert (ready->n_ready >= 1);
995 return ready->vec + ready->first - ready->n_ready + 1;
998 /* Add an element INSN to the ready list so that it ends up with the
999 lowest/highest priority depending on FIRST_P. */
1001 HAIFA_INLINE static void
1002 ready_add (struct ready_list *ready, rtx insn, bool first_p)
1004 if (!first_p)
1006 if (ready->first == ready->n_ready)
1008 memmove (ready->vec + ready->veclen - ready->n_ready,
1009 ready_lastpos (ready),
1010 ready->n_ready * sizeof (rtx));
1011 ready->first = ready->veclen - 1;
1013 ready->vec[ready->first - ready->n_ready] = insn;
1015 else
1017 if (ready->first == ready->veclen - 1)
1019 if (ready->n_ready)
1020 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1021 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1022 ready_lastpos (ready),
1023 ready->n_ready * sizeof (rtx));
1024 ready->first = ready->veclen - 2;
1026 ready->vec[++(ready->first)] = insn;
1029 ready->n_ready++;
1031 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1032 QUEUE_INDEX (insn) = QUEUE_READY;
1035 /* Remove the element with the highest priority from the ready list and
1036 return it. */
1038 HAIFA_INLINE static rtx
1039 ready_remove_first (struct ready_list *ready)
1041 rtx t;
1043 gcc_assert (ready->n_ready);
1044 t = ready->vec[ready->first--];
1045 ready->n_ready--;
1046 /* If the queue becomes empty, reset it. */
1047 if (ready->n_ready == 0)
1048 ready->first = ready->veclen - 1;
1050 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1051 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1053 return t;
1056 /* The following code implements multi-pass scheduling for the first
1057 cycle. In other words, we will try to choose ready insn which
1058 permits to start maximum number of insns on the same cycle. */
1060 /* Return a pointer to the element INDEX from the ready. INDEX for
1061 insn with the highest priority is 0, and the lowest priority has
1062 N_READY - 1. */
1064 HAIFA_INLINE static rtx
1065 ready_element (struct ready_list *ready, int index)
1067 gcc_assert (ready->n_ready && index < ready->n_ready);
1069 return ready->vec[ready->first - index];
1072 /* Remove the element INDEX from the ready list and return it. INDEX
1073 for insn with the highest priority is 0, and the lowest priority
1074 has N_READY - 1. */
1076 HAIFA_INLINE static rtx
1077 ready_remove (struct ready_list *ready, int index)
1079 rtx t;
1080 int i;
1082 if (index == 0)
1083 return ready_remove_first (ready);
1084 gcc_assert (ready->n_ready && index < ready->n_ready);
1085 t = ready->vec[ready->first - index];
1086 ready->n_ready--;
1087 for (i = index; i < ready->n_ready; i++)
1088 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1089 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1090 return t;
1093 /* Remove INSN from the ready list. */
1094 static void
1095 ready_remove_insn (rtx insn)
1097 int i;
1099 for (i = 0; i < readyp->n_ready; i++)
1100 if (ready_element (readyp, i) == insn)
1102 ready_remove (readyp, i);
1103 return;
1105 gcc_unreachable ();
1108 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1109 macro. */
1111 HAIFA_INLINE static void
1112 ready_sort (struct ready_list *ready)
1114 rtx *first = ready_lastpos (ready);
1115 SCHED_SORT (first, ready->n_ready);
1118 /* PREV is an insn that is ready to execute. Adjust its priority if that
1119 will help shorten or lengthen register lifetimes as appropriate. Also
1120 provide a hook for the target to tweek itself. */
1122 HAIFA_INLINE static void
1123 adjust_priority (rtx prev)
1125 /* ??? There used to be code here to try and estimate how an insn
1126 affected register lifetimes, but it did it by looking at REG_DEAD
1127 notes, which we removed in schedule_region. Nor did it try to
1128 take into account register pressure or anything useful like that.
1130 Revisit when we have a machine model to work with and not before. */
1132 if (targetm.sched.adjust_priority)
1133 INSN_PRIORITY (prev) =
1134 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1137 /* Advance time on one cycle. */
1138 HAIFA_INLINE static void
1139 advance_one_cycle (void)
1141 if (targetm.sched.dfa_pre_cycle_insn)
1142 state_transition (curr_state,
1143 targetm.sched.dfa_pre_cycle_insn ());
1145 state_transition (curr_state, NULL);
1147 if (targetm.sched.dfa_post_cycle_insn)
1148 state_transition (curr_state,
1149 targetm.sched.dfa_post_cycle_insn ());
1152 /* Clock at which the previous instruction was issued. */
1153 static int last_clock_var;
1155 /* INSN is the "currently executing insn". Launch each insn which was
1156 waiting on INSN. READY is the ready list which contains the insns
1157 that are ready to fire. CLOCK is the current cycle. The function
1158 returns necessary cycle advance after issuing the insn (it is not
1159 zero for insns in a schedule group). */
1161 static int
1162 schedule_insn (rtx insn)
1164 dep_link_t link;
1165 int advance = 0;
1167 if (sched_verbose >= 1)
1169 char buf[2048];
1171 print_insn (buf, insn, 0);
1172 buf[40] = 0;
1173 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
1175 if (recog_memoized (insn) < 0)
1176 fprintf (sched_dump, "nothing");
1177 else
1178 print_reservation (sched_dump, insn);
1179 fputc ('\n', sched_dump);
1182 /* Scheduling instruction should have all its dependencies resolved and
1183 should have been removed from the ready list. */
1184 gcc_assert (INSN_DEP_COUNT (insn) == 0
1185 && deps_list_empty_p (INSN_BACK_DEPS (insn)));
1186 free_deps_list (INSN_BACK_DEPS (insn));
1188 /* Now we can free INSN_RESOLVED_BACK_DEPS list. */
1189 delete_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
1191 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
1192 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
1194 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1195 if (INSN_TICK (insn) > clock_var)
1196 /* INSN has been prematurely moved from the queue to the ready list.
1197 This is possible only if following flag is set. */
1198 gcc_assert (flag_sched_stalled_insns);
1200 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1201 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1202 INSN_TICK (insn) = clock_var;
1204 /* Update dependent instructions. */
1205 FOR_EACH_DEP_LINK (link, INSN_FORW_DEPS (insn))
1207 rtx next = DEP_LINK_CON (link);
1209 /* Resolve the dependence between INSN and NEXT. */
1211 INSN_DEP_COUNT (next)--;
1213 move_dep_link (DEP_NODE_BACK (DEP_LINK_NODE (link)),
1214 INSN_RESOLVED_BACK_DEPS (next));
1216 gcc_assert ((INSN_DEP_COUNT (next) == 0)
1217 == deps_list_empty_p (INSN_BACK_DEPS (next)));
1219 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
1221 int effective_cost;
1223 effective_cost = try_ready (next);
1225 if (effective_cost >= 0
1226 && SCHED_GROUP_P (next)
1227 && advance < effective_cost)
1228 advance = effective_cost;
1230 else
1231 /* Check always has only one forward dependence (to the first insn in
1232 the recovery block), therefore, this will be executed only once. */
1234 gcc_assert (DEP_LINK_NEXT (link) == NULL);
1235 fix_recovery_deps (RECOVERY_BLOCK (insn));
1239 /* Annotate the instruction with issue information -- TImode
1240 indicates that the instruction is expected not to be able
1241 to issue on the same cycle as the previous insn. A machine
1242 may use this information to decide how the instruction should
1243 be aligned. */
1244 if (issue_rate > 1
1245 && GET_CODE (PATTERN (insn)) != USE
1246 && GET_CODE (PATTERN (insn)) != CLOBBER)
1248 if (reload_completed)
1249 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1250 last_clock_var = clock_var;
1253 return advance;
1256 /* Functions for handling of notes. */
1258 /* Delete notes beginning with INSN and put them in the chain
1259 of notes ended by NOTE_LIST.
1260 Returns the insn following the notes. */
1262 static rtx
1263 unlink_other_notes (rtx insn, rtx tail)
1265 rtx prev = PREV_INSN (insn);
1267 while (insn != tail && NOTE_NOT_BB_P (insn))
1269 rtx next = NEXT_INSN (insn);
1270 basic_block bb = BLOCK_FOR_INSN (insn);
1272 /* Delete the note from its current position. */
1273 if (prev)
1274 NEXT_INSN (prev) = next;
1275 if (next)
1276 PREV_INSN (next) = prev;
1278 if (bb)
1280 /* Basic block can begin with either LABEL or
1281 NOTE_INSN_BASIC_BLOCK. */
1282 gcc_assert (BB_HEAD (bb) != insn);
1284 /* Check if we are removing last insn in the BB. */
1285 if (BB_END (bb) == insn)
1286 BB_END (bb) = prev;
1289 /* See sched_analyze to see how these are handled. */
1290 if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
1291 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
1293 /* Insert the note at the end of the notes list. */
1294 PREV_INSN (insn) = note_list;
1295 if (note_list)
1296 NEXT_INSN (note_list) = insn;
1297 note_list = insn;
1300 insn = next;
1302 return insn;
1305 /* Return the head and tail pointers of ebb starting at BEG and ending
1306 at END. */
1308 void
1309 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
1311 rtx beg_head = BB_HEAD (beg);
1312 rtx beg_tail = BB_END (beg);
1313 rtx end_head = BB_HEAD (end);
1314 rtx end_tail = BB_END (end);
1316 /* Don't include any notes or labels at the beginning of the BEG
1317 basic block, or notes at the end of the END basic blocks. */
1319 if (LABEL_P (beg_head))
1320 beg_head = NEXT_INSN (beg_head);
1322 while (beg_head != beg_tail)
1323 if (NOTE_P (beg_head))
1324 beg_head = NEXT_INSN (beg_head);
1325 else
1326 break;
1328 *headp = beg_head;
1330 if (beg == end)
1331 end_head = beg_head;
1332 else if (LABEL_P (end_head))
1333 end_head = NEXT_INSN (end_head);
1335 while (end_head != end_tail)
1336 if (NOTE_P (end_tail))
1337 end_tail = PREV_INSN (end_tail);
1338 else
1339 break;
1341 *tailp = end_tail;
1344 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1347 no_real_insns_p (rtx head, rtx tail)
1349 while (head != NEXT_INSN (tail))
1351 if (!NOTE_P (head) && !LABEL_P (head))
1352 return 0;
1353 head = NEXT_INSN (head);
1355 return 1;
1358 /* Delete notes between HEAD and TAIL and put them in the chain
1359 of notes ended by NOTE_LIST. */
1361 void
1362 rm_other_notes (rtx head, rtx tail)
1364 rtx next_tail;
1365 rtx insn;
1367 note_list = 0;
1368 if (head == tail && (! INSN_P (head)))
1369 return;
1371 next_tail = NEXT_INSN (tail);
1372 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1374 rtx prev;
1376 /* Farm out notes, and maybe save them in NOTE_LIST.
1377 This is needed to keep the debugger from
1378 getting completely deranged. */
1379 if (NOTE_NOT_BB_P (insn))
1381 prev = insn;
1383 insn = unlink_other_notes (insn, next_tail);
1385 gcc_assert (prev != tail && prev != head && insn != next_tail);
1390 /* Functions for computation of registers live/usage info. */
1392 /* This function looks for a new register being defined.
1393 If the destination register is already used by the source,
1394 a new register is not needed. */
1396 static int
1397 find_set_reg_weight (rtx x)
1399 if (GET_CODE (x) == CLOBBER
1400 && register_operand (SET_DEST (x), VOIDmode))
1401 return 1;
1402 if (GET_CODE (x) == SET
1403 && register_operand (SET_DEST (x), VOIDmode))
1405 if (REG_P (SET_DEST (x)))
1407 if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
1408 return 1;
1409 else
1410 return 0;
1412 return 1;
1414 return 0;
1417 /* Calculate INSN_REG_WEIGHT for all insns of a block. */
1419 static void
1420 find_insn_reg_weight (basic_block bb)
1422 rtx insn, next_tail, head, tail;
1424 get_ebb_head_tail (bb, bb, &head, &tail);
1425 next_tail = NEXT_INSN (tail);
1427 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1428 find_insn_reg_weight1 (insn);
1431 /* Calculate INSN_REG_WEIGHT for single instruction.
1432 Separated from find_insn_reg_weight because of need
1433 to initialize new instruction in generate_recovery_code. */
1434 static void
1435 find_insn_reg_weight1 (rtx insn)
1437 int reg_weight = 0;
1438 rtx x;
1440 /* Handle register life information. */
1441 if (! INSN_P (insn))
1442 return;
1444 /* Increment weight for each register born here. */
1445 x = PATTERN (insn);
1446 reg_weight += find_set_reg_weight (x);
1447 if (GET_CODE (x) == PARALLEL)
1449 int j;
1450 for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
1452 x = XVECEXP (PATTERN (insn), 0, j);
1453 reg_weight += find_set_reg_weight (x);
1456 /* Decrement weight for each register that dies here. */
1457 for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
1459 if (REG_NOTE_KIND (x) == REG_DEAD
1460 || REG_NOTE_KIND (x) == REG_UNUSED)
1461 reg_weight--;
1464 INSN_REG_WEIGHT (insn) = reg_weight;
1467 /* Move insns that became ready to fire from queue to ready list. */
1469 static void
1470 queue_to_ready (struct ready_list *ready)
1472 rtx insn;
1473 rtx link;
1475 q_ptr = NEXT_Q (q_ptr);
1477 /* Add all pending insns that can be scheduled without stalls to the
1478 ready list. */
1479 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
1481 insn = XEXP (link, 0);
1482 q_size -= 1;
1484 if (sched_verbose >= 2)
1485 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1486 (*current_sched_info->print_insn) (insn, 0));
1488 /* If the ready list is full, delay the insn for 1 cycle.
1489 See the comment in schedule_block for the rationale. */
1490 if (!reload_completed
1491 && ready->n_ready > MAX_SCHED_READY_INSNS
1492 && !SCHED_GROUP_P (insn))
1494 if (sched_verbose >= 2)
1495 fprintf (sched_dump, "requeued because ready full\n");
1496 queue_insn (insn, 1);
1498 else
1500 ready_add (ready, insn, false);
1501 if (sched_verbose >= 2)
1502 fprintf (sched_dump, "moving to ready without stalls\n");
1505 free_INSN_LIST_list (&insn_queue[q_ptr]);
1507 /* If there are no ready insns, stall until one is ready and add all
1508 of the pending insns at that point to the ready list. */
1509 if (ready->n_ready == 0)
1511 int stalls;
1513 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
1515 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1517 for (; link; link = XEXP (link, 1))
1519 insn = XEXP (link, 0);
1520 q_size -= 1;
1522 if (sched_verbose >= 2)
1523 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1524 (*current_sched_info->print_insn) (insn, 0));
1526 ready_add (ready, insn, false);
1527 if (sched_verbose >= 2)
1528 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
1530 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
1532 advance_one_cycle ();
1534 break;
1537 advance_one_cycle ();
1540 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
1541 clock_var += stalls;
1545 /* Used by early_queue_to_ready. Determines whether it is "ok" to
1546 prematurely move INSN from the queue to the ready list. Currently,
1547 if a target defines the hook 'is_costly_dependence', this function
1548 uses the hook to check whether there exist any dependences which are
1549 considered costly by the target, between INSN and other insns that
1550 have already been scheduled. Dependences are checked up to Y cycles
1551 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
1552 controlling this value.
1553 (Other considerations could be taken into account instead (or in
1554 addition) depending on user flags and target hooks. */
1556 static bool
1557 ok_for_early_queue_removal (rtx insn)
1559 int n_cycles;
1560 rtx prev_insn = last_scheduled_insn;
1562 if (targetm.sched.is_costly_dependence)
1564 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
1566 for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
1568 int cost;
1570 if (!NOTE_P (prev_insn))
1572 dep_link_t dep_link;
1574 dep_link = (find_link_by_con_in_deps_list
1575 (INSN_FORW_DEPS (prev_insn), insn));
1577 if (dep_link)
1579 dep_t dep = DEP_LINK_DEP (dep_link);
1581 cost = dep_cost (dep);
1583 if (targetm.sched.is_costly_dependence (dep, cost,
1584 flag_sched_stalled_insns_dep - n_cycles))
1585 return false;
1589 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
1590 break;
1593 if (!prev_insn)
1594 break;
1595 prev_insn = PREV_INSN (prev_insn);
1599 return true;
1603 /* Remove insns from the queue, before they become "ready" with respect
1604 to FU latency considerations. */
1606 static int
1607 early_queue_to_ready (state_t state, struct ready_list *ready)
1609 rtx insn;
1610 rtx link;
1611 rtx next_link;
1612 rtx prev_link;
1613 bool move_to_ready;
1614 int cost;
1615 state_t temp_state = alloca (dfa_state_size);
1616 int stalls;
1617 int insns_removed = 0;
1620 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
1621 function:
1623 X == 0: There is no limit on how many queued insns can be removed
1624 prematurely. (flag_sched_stalled_insns = -1).
1626 X >= 1: Only X queued insns can be removed prematurely in each
1627 invocation. (flag_sched_stalled_insns = X).
1629 Otherwise: Early queue removal is disabled.
1630 (flag_sched_stalled_insns = 0)
1633 if (! flag_sched_stalled_insns)
1634 return 0;
1636 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
1638 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1640 if (sched_verbose > 6)
1641 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
1643 prev_link = 0;
1644 while (link)
1646 next_link = XEXP (link, 1);
1647 insn = XEXP (link, 0);
1648 if (insn && sched_verbose > 6)
1649 print_rtl_single (sched_dump, insn);
1651 memcpy (temp_state, state, dfa_state_size);
1652 if (recog_memoized (insn) < 0)
1653 /* non-negative to indicate that it's not ready
1654 to avoid infinite Q->R->Q->R... */
1655 cost = 0;
1656 else
1657 cost = state_transition (temp_state, insn);
1659 if (sched_verbose >= 6)
1660 fprintf (sched_dump, "transition cost = %d\n", cost);
1662 move_to_ready = false;
1663 if (cost < 0)
1665 move_to_ready = ok_for_early_queue_removal (insn);
1666 if (move_to_ready == true)
1668 /* move from Q to R */
1669 q_size -= 1;
1670 ready_add (ready, insn, false);
1672 if (prev_link)
1673 XEXP (prev_link, 1) = next_link;
1674 else
1675 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
1677 free_INSN_LIST_node (link);
1679 if (sched_verbose >= 2)
1680 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
1681 (*current_sched_info->print_insn) (insn, 0));
1683 insns_removed++;
1684 if (insns_removed == flag_sched_stalled_insns)
1685 /* Remove no more than flag_sched_stalled_insns insns
1686 from Q at a time. */
1687 return insns_removed;
1691 if (move_to_ready == false)
1692 prev_link = link;
1694 link = next_link;
1695 } /* while link */
1696 } /* if link */
1698 } /* for stalls.. */
1700 return insns_removed;
1704 /* Print the ready list for debugging purposes. Callable from debugger. */
1706 static void
1707 debug_ready_list (struct ready_list *ready)
1709 rtx *p;
1710 int i;
1712 if (ready->n_ready == 0)
1714 fprintf (sched_dump, "\n");
1715 return;
1718 p = ready_lastpos (ready);
1719 for (i = 0; i < ready->n_ready; i++)
1720 fprintf (sched_dump, " %s", (*current_sched_info->print_insn) (p[i], 0));
1721 fprintf (sched_dump, "\n");
1724 /* Search INSN for REG_SAVE_NOTE note pairs for
1725 NOTE_INSN_EHREGION_{BEG,END}; and convert them back into
1726 NOTEs. The REG_SAVE_NOTE note following first one is contains the
1727 saved value for NOTE_BLOCK_NUMBER which is useful for
1728 NOTE_INSN_EH_REGION_{BEG,END} NOTEs. */
1730 static void
1731 reemit_notes (rtx insn)
1733 rtx note, last = insn;
1735 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1737 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
1739 enum insn_note note_type = INTVAL (XEXP (note, 0));
1741 last = emit_note_before (note_type, last);
1742 remove_note (insn, note);
1747 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
1748 static void
1749 move_insn (rtx insn)
1751 rtx last = last_scheduled_insn;
1753 if (PREV_INSN (insn) != last)
1755 basic_block bb;
1756 rtx note;
1757 int jump_p = 0;
1759 bb = BLOCK_FOR_INSN (insn);
1761 /* BB_HEAD is either LABEL or NOTE. */
1762 gcc_assert (BB_HEAD (bb) != insn);
1764 if (BB_END (bb) == insn)
1765 /* If this is last instruction in BB, move end marker one
1766 instruction up. */
1768 /* Jumps are always placed at the end of basic block. */
1769 jump_p = control_flow_insn_p (insn);
1771 gcc_assert (!jump_p
1772 || ((current_sched_info->flags & SCHED_RGN)
1773 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
1774 || (current_sched_info->flags & SCHED_EBB));
1776 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
1778 BB_END (bb) = PREV_INSN (insn);
1781 gcc_assert (BB_END (bb) != last);
1783 if (jump_p)
1784 /* We move the block note along with jump. */
1786 /* NT is needed for assertion below. */
1787 rtx nt = current_sched_info->next_tail;
1789 note = NEXT_INSN (insn);
1790 while (NOTE_NOT_BB_P (note) && note != nt)
1791 note = NEXT_INSN (note);
1793 if (note != nt
1794 && (LABEL_P (note)
1795 || BARRIER_P (note)))
1796 note = NEXT_INSN (note);
1798 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
1800 else
1801 note = insn;
1803 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
1804 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
1806 NEXT_INSN (note) = NEXT_INSN (last);
1807 PREV_INSN (NEXT_INSN (last)) = note;
1809 NEXT_INSN (last) = insn;
1810 PREV_INSN (insn) = last;
1812 bb = BLOCK_FOR_INSN (last);
1814 if (jump_p)
1816 fix_jump_move (insn);
1818 if (BLOCK_FOR_INSN (insn) != bb)
1819 move_block_after_check (insn);
1821 gcc_assert (BB_END (bb) == last);
1824 set_block_for_insn (insn, bb);
1826 /* Update BB_END, if needed. */
1827 if (BB_END (bb) == last)
1828 BB_END (bb) = insn;
1831 reemit_notes (insn);
1833 SCHED_GROUP_P (insn) = 0;
1836 /* The following structure describe an entry of the stack of choices. */
1837 struct choice_entry
1839 /* Ordinal number of the issued insn in the ready queue. */
1840 int index;
1841 /* The number of the rest insns whose issues we should try. */
1842 int rest;
1843 /* The number of issued essential insns. */
1844 int n;
1845 /* State after issuing the insn. */
1846 state_t state;
1849 /* The following array is used to implement a stack of choices used in
1850 function max_issue. */
1851 static struct choice_entry *choice_stack;
1853 /* The following variable value is number of essential insns issued on
1854 the current cycle. An insn is essential one if it changes the
1855 processors state. */
1856 static int cycle_issued_insns;
1858 /* The following variable value is maximal number of tries of issuing
1859 insns for the first cycle multipass insn scheduling. We define
1860 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
1861 need this constraint if all real insns (with non-negative codes)
1862 had reservations because in this case the algorithm complexity is
1863 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
1864 might be incomplete and such insn might occur. For such
1865 descriptions, the complexity of algorithm (without the constraint)
1866 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
1867 static int max_lookahead_tries;
1869 /* The following value is value of hook
1870 `first_cycle_multipass_dfa_lookahead' at the last call of
1871 `max_issue'. */
1872 static int cached_first_cycle_multipass_dfa_lookahead = 0;
1874 /* The following value is value of `issue_rate' at the last call of
1875 `sched_init'. */
1876 static int cached_issue_rate = 0;
1878 /* The following function returns maximal (or close to maximal) number
1879 of insns which can be issued on the same cycle and one of which
1880 insns is insns with the best rank (the first insn in READY). To
1881 make this function tries different samples of ready insns. READY
1882 is current queue `ready'. Global array READY_TRY reflects what
1883 insns are already issued in this try. MAX_POINTS is the sum of points
1884 of all instructions in READY. The function stops immediately,
1885 if it reached the such a solution, that all instruction can be issued.
1886 INDEX will contain index of the best insn in READY. The following
1887 function is used only for first cycle multipass scheduling. */
1888 static int
1889 max_issue (struct ready_list *ready, int *index, int max_points)
1891 int n, i, all, n_ready, best, delay, tries_num, points = -1;
1892 struct choice_entry *top;
1893 rtx insn;
1895 best = 0;
1896 memcpy (choice_stack->state, curr_state, dfa_state_size);
1897 top = choice_stack;
1898 top->rest = cached_first_cycle_multipass_dfa_lookahead;
1899 top->n = 0;
1900 n_ready = ready->n_ready;
1901 for (all = i = 0; i < n_ready; i++)
1902 if (!ready_try [i])
1903 all++;
1904 i = 0;
1905 tries_num = 0;
1906 for (;;)
1908 if (top->rest == 0 || i >= n_ready)
1910 if (top == choice_stack)
1911 break;
1912 if (best < top - choice_stack && ready_try [0])
1914 best = top - choice_stack;
1915 *index = choice_stack [1].index;
1916 points = top->n;
1917 if (top->n == max_points || best == all)
1918 break;
1920 i = top->index;
1921 ready_try [i] = 0;
1922 top--;
1923 memcpy (curr_state, top->state, dfa_state_size);
1925 else if (!ready_try [i])
1927 tries_num++;
1928 if (tries_num > max_lookahead_tries)
1929 break;
1930 insn = ready_element (ready, i);
1931 delay = state_transition (curr_state, insn);
1932 if (delay < 0)
1934 if (state_dead_lock_p (curr_state))
1935 top->rest = 0;
1936 else
1937 top->rest--;
1938 n = top->n;
1939 if (memcmp (top->state, curr_state, dfa_state_size) != 0)
1940 n += ISSUE_POINTS (insn);
1941 top++;
1942 top->rest = cached_first_cycle_multipass_dfa_lookahead;
1943 top->index = i;
1944 top->n = n;
1945 memcpy (top->state, curr_state, dfa_state_size);
1946 ready_try [i] = 1;
1947 i = -1;
1950 i++;
1952 while (top != choice_stack)
1954 ready_try [top->index] = 0;
1955 top--;
1957 memcpy (curr_state, choice_stack->state, dfa_state_size);
1959 if (sched_verbose >= 4)
1960 fprintf (sched_dump, ";;\t\tChoosed insn : %s; points: %d/%d\n",
1961 (*current_sched_info->print_insn) (ready_element (ready, *index),
1962 0),
1963 points, max_points);
1965 return best;
1968 /* The following function chooses insn from READY and modifies
1969 *N_READY and READY. The following function is used only for first
1970 cycle multipass scheduling. */
1972 static rtx
1973 choose_ready (struct ready_list *ready)
1975 int lookahead = 0;
1977 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
1978 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
1979 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0)))
1980 return ready_remove_first (ready);
1981 else
1983 /* Try to choose the better insn. */
1984 int index = 0, i, n;
1985 rtx insn;
1986 int more_issue, max_points, try_data = 1, try_control = 1;
1988 if (cached_first_cycle_multipass_dfa_lookahead != lookahead)
1990 cached_first_cycle_multipass_dfa_lookahead = lookahead;
1991 max_lookahead_tries = 100;
1992 for (i = 0; i < issue_rate; i++)
1993 max_lookahead_tries *= lookahead;
1995 insn = ready_element (ready, 0);
1996 if (INSN_CODE (insn) < 0)
1997 return ready_remove_first (ready);
1999 if (spec_info
2000 && spec_info->flags & (PREFER_NON_DATA_SPEC
2001 | PREFER_NON_CONTROL_SPEC))
2003 for (i = 0, n = ready->n_ready; i < n; i++)
2005 rtx x;
2006 ds_t s;
2008 x = ready_element (ready, i);
2009 s = TODO_SPEC (x);
2011 if (spec_info->flags & PREFER_NON_DATA_SPEC
2012 && !(s & DATA_SPEC))
2014 try_data = 0;
2015 if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
2016 || !try_control)
2017 break;
2020 if (spec_info->flags & PREFER_NON_CONTROL_SPEC
2021 && !(s & CONTROL_SPEC))
2023 try_control = 0;
2024 if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
2025 break;
2030 if ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2031 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
2032 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2033 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2034 (insn)))
2035 /* Discard speculative instruction that stands first in the ready
2036 list. */
2038 change_queue_index (insn, 1);
2039 return 0;
2042 max_points = ISSUE_POINTS (insn);
2043 more_issue = issue_rate - cycle_issued_insns - 1;
2045 for (i = 1; i < ready->n_ready; i++)
2047 insn = ready_element (ready, i);
2048 ready_try [i]
2049 = (INSN_CODE (insn) < 0
2050 || (!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2051 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
2052 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2053 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2054 (insn)));
2056 if (!ready_try [i] && more_issue-- > 0)
2057 max_points += ISSUE_POINTS (insn);
2060 if (max_issue (ready, &index, max_points) == 0)
2061 return ready_remove_first (ready);
2062 else
2063 return ready_remove (ready, index);
2067 /* Use forward list scheduling to rearrange insns of block pointed to by
2068 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2069 region. */
2071 void
2072 schedule_block (basic_block *target_bb, int rgn_n_insns1)
2074 struct ready_list ready;
2075 int i, first_cycle_insn_p;
2076 int can_issue_more;
2077 state_t temp_state = NULL; /* It is used for multipass scheduling. */
2078 int sort_p, advance, start_clock_var;
2080 /* Head/tail info for this block. */
2081 rtx prev_head = current_sched_info->prev_head;
2082 rtx next_tail = current_sched_info->next_tail;
2083 rtx head = NEXT_INSN (prev_head);
2084 rtx tail = PREV_INSN (next_tail);
2086 /* We used to have code to avoid getting parameters moved from hard
2087 argument registers into pseudos.
2089 However, it was removed when it proved to be of marginal benefit
2090 and caused problems because schedule_block and compute_forward_dependences
2091 had different notions of what the "head" insn was. */
2093 gcc_assert (head != tail || INSN_P (head));
2095 added_recovery_block_p = false;
2097 /* Debug info. */
2098 if (sched_verbose)
2099 dump_new_block_header (0, *target_bb, head, tail);
2101 state_reset (curr_state);
2103 /* Allocate the ready list. */
2104 readyp = &ready;
2105 ready.vec = NULL;
2106 ready_try = NULL;
2107 choice_stack = NULL;
2109 rgn_n_insns = -1;
2110 extend_ready (rgn_n_insns1 + 1);
2112 ready.first = ready.veclen - 1;
2113 ready.n_ready = 0;
2115 /* It is used for first cycle multipass scheduling. */
2116 temp_state = alloca (dfa_state_size);
2118 if (targetm.sched.md_init)
2119 targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen);
2121 /* We start inserting insns after PREV_HEAD. */
2122 last_scheduled_insn = prev_head;
2124 gcc_assert (NOTE_P (last_scheduled_insn)
2125 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
2127 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2128 queue. */
2129 q_ptr = 0;
2130 q_size = 0;
2132 insn_queue = alloca ((max_insn_queue_index + 1) * sizeof (rtx));
2133 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
2135 /* Start just before the beginning of time. */
2136 clock_var = -1;
2138 /* We need queue and ready lists and clock_var be initialized
2139 in try_ready () (which is called through init_ready_list ()). */
2140 (*current_sched_info->init_ready_list) ();
2142 /* The algorithm is O(n^2) in the number of ready insns at any given
2143 time in the worst case. Before reload we are more likely to have
2144 big lists so truncate them to a reasonable size. */
2145 if (!reload_completed && ready.n_ready > MAX_SCHED_READY_INSNS)
2147 ready_sort (&ready);
2149 /* Find first free-standing insn past MAX_SCHED_READY_INSNS. */
2150 for (i = MAX_SCHED_READY_INSNS; i < ready.n_ready; i++)
2151 if (!SCHED_GROUP_P (ready_element (&ready, i)))
2152 break;
2154 if (sched_verbose >= 2)
2156 fprintf (sched_dump,
2157 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
2158 fprintf (sched_dump,
2159 ";;\t\t before reload => truncated to %d insns\n", i);
2162 /* Delay all insns past it for 1 cycle. */
2163 while (i < ready.n_ready)
2164 queue_insn (ready_remove (&ready, i), 1);
2167 /* Now we can restore basic block notes and maintain precise cfg. */
2168 restore_bb_notes (*target_bb);
2170 last_clock_var = -1;
2172 advance = 0;
2174 sort_p = TRUE;
2175 /* Loop until all the insns in BB are scheduled. */
2176 while ((*current_sched_info->schedule_more_p) ())
2180 start_clock_var = clock_var;
2182 clock_var++;
2184 advance_one_cycle ();
2186 /* Add to the ready list all pending insns that can be issued now.
2187 If there are no ready insns, increment clock until one
2188 is ready and add all pending insns at that point to the ready
2189 list. */
2190 queue_to_ready (&ready);
2192 gcc_assert (ready.n_ready);
2194 if (sched_verbose >= 2)
2196 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
2197 debug_ready_list (&ready);
2199 advance -= clock_var - start_clock_var;
2201 while (advance > 0);
2203 if (sort_p)
2205 /* Sort the ready list based on priority. */
2206 ready_sort (&ready);
2208 if (sched_verbose >= 2)
2210 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
2211 debug_ready_list (&ready);
2215 /* Allow the target to reorder the list, typically for
2216 better instruction bundling. */
2217 if (sort_p && targetm.sched.reorder
2218 && (ready.n_ready == 0
2219 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2220 can_issue_more =
2221 targetm.sched.reorder (sched_dump, sched_verbose,
2222 ready_lastpos (&ready),
2223 &ready.n_ready, clock_var);
2224 else
2225 can_issue_more = issue_rate;
2227 first_cycle_insn_p = 1;
2228 cycle_issued_insns = 0;
2229 for (;;)
2231 rtx insn;
2232 int cost;
2233 bool asm_p = false;
2235 if (sched_verbose >= 2)
2237 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
2238 clock_var);
2239 debug_ready_list (&ready);
2242 if (ready.n_ready == 0
2243 && can_issue_more
2244 && reload_completed)
2246 /* Allow scheduling insns directly from the queue in case
2247 there's nothing better to do (ready list is empty) but
2248 there are still vacant dispatch slots in the current cycle. */
2249 if (sched_verbose >= 6)
2250 fprintf (sched_dump,";;\t\tSecond chance\n");
2251 memcpy (temp_state, curr_state, dfa_state_size);
2252 if (early_queue_to_ready (temp_state, &ready))
2253 ready_sort (&ready);
2256 if (ready.n_ready == 0 || !can_issue_more
2257 || state_dead_lock_p (curr_state)
2258 || !(*current_sched_info->schedule_more_p) ())
2259 break;
2261 /* Select and remove the insn from the ready list. */
2262 if (sort_p)
2264 insn = choose_ready (&ready);
2265 if (!insn)
2266 continue;
2268 else
2269 insn = ready_remove_first (&ready);
2271 if (targetm.sched.dfa_new_cycle
2272 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
2273 insn, last_clock_var,
2274 clock_var, &sort_p))
2275 /* SORT_P is used by the target to override sorting
2276 of the ready list. This is needed when the target
2277 has modified its internal structures expecting that
2278 the insn will be issued next. As we need the insn
2279 to have the highest priority (so it will be returned by
2280 the ready_remove_first call above), we invoke
2281 ready_add (&ready, insn, true).
2282 But, still, there is one issue: INSN can be later
2283 discarded by scheduler's front end through
2284 current_sched_info->can_schedule_ready_p, hence, won't
2285 be issued next. */
2287 ready_add (&ready, insn, true);
2288 break;
2291 sort_p = TRUE;
2292 memcpy (temp_state, curr_state, dfa_state_size);
2293 if (recog_memoized (insn) < 0)
2295 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
2296 || asm_noperands (PATTERN (insn)) >= 0);
2297 if (!first_cycle_insn_p && asm_p)
2298 /* This is asm insn which is tryed to be issued on the
2299 cycle not first. Issue it on the next cycle. */
2300 cost = 1;
2301 else
2302 /* A USE insn, or something else we don't need to
2303 understand. We can't pass these directly to
2304 state_transition because it will trigger a
2305 fatal error for unrecognizable insns. */
2306 cost = 0;
2308 else
2310 cost = state_transition (temp_state, insn);
2311 if (cost < 0)
2312 cost = 0;
2313 else if (cost == 0)
2314 cost = 1;
2317 if (cost >= 1)
2319 queue_insn (insn, cost);
2320 if (SCHED_GROUP_P (insn))
2322 advance = cost;
2323 break;
2326 continue;
2329 if (current_sched_info->can_schedule_ready_p
2330 && ! (*current_sched_info->can_schedule_ready_p) (insn))
2331 /* We normally get here only if we don't want to move
2332 insn from the split block. */
2334 TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
2335 continue;
2338 /* DECISION is made. */
2340 if (TODO_SPEC (insn) & SPECULATIVE)
2341 generate_recovery_code (insn);
2343 if (control_flow_insn_p (last_scheduled_insn)
2344 /* This is used to to switch basic blocks by request
2345 from scheduler front-end (actually, sched-ebb.c only).
2346 This is used to process blocks with single fallthru
2347 edge. If succeeding block has jump, it [jump] will try
2348 move at the end of current bb, thus corrupting CFG. */
2349 || current_sched_info->advance_target_bb (*target_bb, insn))
2351 *target_bb = current_sched_info->advance_target_bb
2352 (*target_bb, 0);
2354 if (sched_verbose)
2356 rtx x;
2358 x = next_real_insn (last_scheduled_insn);
2359 gcc_assert (x);
2360 dump_new_block_header (1, *target_bb, x, tail);
2363 last_scheduled_insn = bb_note (*target_bb);
2366 /* Update counters, etc in the scheduler's front end. */
2367 (*current_sched_info->begin_schedule_ready) (insn,
2368 last_scheduled_insn);
2370 move_insn (insn);
2371 last_scheduled_insn = insn;
2373 if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
2375 cycle_issued_insns++;
2376 memcpy (curr_state, temp_state, dfa_state_size);
2379 if (targetm.sched.variable_issue)
2380 can_issue_more =
2381 targetm.sched.variable_issue (sched_dump, sched_verbose,
2382 insn, can_issue_more);
2383 /* A naked CLOBBER or USE generates no instruction, so do
2384 not count them against the issue rate. */
2385 else if (GET_CODE (PATTERN (insn)) != USE
2386 && GET_CODE (PATTERN (insn)) != CLOBBER)
2387 can_issue_more--;
2389 advance = schedule_insn (insn);
2391 /* After issuing an asm insn we should start a new cycle. */
2392 if (advance == 0 && asm_p)
2393 advance = 1;
2394 if (advance != 0)
2395 break;
2397 first_cycle_insn_p = 0;
2399 /* Sort the ready list based on priority. This must be
2400 redone here, as schedule_insn may have readied additional
2401 insns that will not be sorted correctly. */
2402 if (ready.n_ready > 0)
2403 ready_sort (&ready);
2405 if (targetm.sched.reorder2
2406 && (ready.n_ready == 0
2407 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2409 can_issue_more =
2410 targetm.sched.reorder2 (sched_dump, sched_verbose,
2411 ready.n_ready
2412 ? ready_lastpos (&ready) : NULL,
2413 &ready.n_ready, clock_var);
2418 /* Debug info. */
2419 if (sched_verbose)
2421 fprintf (sched_dump, ";;\tReady list (final): ");
2422 debug_ready_list (&ready);
2425 if (current_sched_info->queue_must_finish_empty)
2426 /* Sanity check -- queue must be empty now. Meaningless if region has
2427 multiple bbs. */
2428 gcc_assert (!q_size && !ready.n_ready);
2429 else
2431 /* We must maintain QUEUE_INDEX between blocks in region. */
2432 for (i = ready.n_ready - 1; i >= 0; i--)
2434 rtx x;
2436 x = ready_element (&ready, i);
2437 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2438 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2441 if (q_size)
2442 for (i = 0; i <= max_insn_queue_index; i++)
2444 rtx link;
2445 for (link = insn_queue[i]; link; link = XEXP (link, 1))
2447 rtx x;
2449 x = XEXP (link, 0);
2450 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2451 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2453 free_INSN_LIST_list (&insn_queue[i]);
2457 if (!current_sched_info->queue_must_finish_empty
2458 || added_recovery_block_p)
2460 /* INSN_TICK (minimum clock tick at which the insn becomes
2461 ready) may be not correct for the insn in the subsequent
2462 blocks of the region. We should use a correct value of
2463 `clock_var' or modify INSN_TICK. It is better to keep
2464 clock_var value equal to 0 at the start of a basic block.
2465 Therefore we modify INSN_TICK here. */
2466 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
2469 if (targetm.sched.md_finish)
2470 targetm.sched.md_finish (sched_dump, sched_verbose);
2472 /* Update head/tail boundaries. */
2473 head = NEXT_INSN (prev_head);
2474 tail = last_scheduled_insn;
2476 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
2477 previously found among the insns. Insert them at the beginning
2478 of the insns. */
2479 if (note_list != 0)
2481 basic_block head_bb = BLOCK_FOR_INSN (head);
2482 rtx note_head = note_list;
2484 while (PREV_INSN (note_head))
2486 set_block_for_insn (note_head, head_bb);
2487 note_head = PREV_INSN (note_head);
2489 /* In the above cycle we've missed this note: */
2490 set_block_for_insn (note_head, head_bb);
2492 PREV_INSN (note_head) = PREV_INSN (head);
2493 NEXT_INSN (PREV_INSN (head)) = note_head;
2494 PREV_INSN (head) = note_list;
2495 NEXT_INSN (note_list) = head;
2496 head = note_head;
2499 /* Debugging. */
2500 if (sched_verbose)
2502 fprintf (sched_dump, ";; total time = %d\n;; new head = %d\n",
2503 clock_var, INSN_UID (head));
2504 fprintf (sched_dump, ";; new tail = %d\n\n",
2505 INSN_UID (tail));
2508 current_sched_info->head = head;
2509 current_sched_info->tail = tail;
2511 free (ready.vec);
2513 free (ready_try);
2514 for (i = 0; i <= rgn_n_insns; i++)
2515 free (choice_stack [i].state);
2516 free (choice_stack);
2519 /* Set_priorities: compute priority of each insn in the block. */
2522 set_priorities (rtx head, rtx tail)
2524 rtx insn;
2525 int n_insn;
2526 int sched_max_insns_priority =
2527 current_sched_info->sched_max_insns_priority;
2528 rtx prev_head;
2530 if (head == tail && (! INSN_P (head)))
2531 return 0;
2533 n_insn = 0;
2535 prev_head = PREV_INSN (head);
2536 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
2538 if (!INSN_P (insn))
2539 continue;
2541 n_insn++;
2542 (void) priority (insn);
2544 if (INSN_PRIORITY_KNOWN (insn))
2545 sched_max_insns_priority =
2546 MAX (sched_max_insns_priority, INSN_PRIORITY (insn));
2549 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
2551 return n_insn;
2554 /* Next LUID to assign to an instruction. */
2555 static int luid;
2557 /* Initialize some global state for the scheduler. */
2559 void
2560 sched_init (void)
2562 basic_block b;
2563 rtx insn;
2564 int i;
2566 /* Switch to working copy of sched_info. */
2567 memcpy (&current_sched_info_var, current_sched_info,
2568 sizeof (current_sched_info_var));
2569 current_sched_info = &current_sched_info_var;
2571 /* Disable speculative loads in their presence if cc0 defined. */
2572 #ifdef HAVE_cc0
2573 flag_schedule_speculative_load = 0;
2574 #endif
2576 /* Set dump and sched_verbose for the desired debugging output. If no
2577 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
2578 For -fsched-verbose=N, N>=10, print everything to stderr. */
2579 sched_verbose = sched_verbose_param;
2580 if (sched_verbose_param == 0 && dump_file)
2581 sched_verbose = 1;
2582 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
2583 ? stderr : dump_file);
2585 /* Initialize SPEC_INFO. */
2586 if (targetm.sched.set_sched_flags)
2588 spec_info = &spec_info_var;
2589 targetm.sched.set_sched_flags (spec_info);
2590 if (current_sched_info->flags & DO_SPECULATION)
2591 spec_info->weakness_cutoff =
2592 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
2593 else
2594 /* So we won't read anything accidentally. */
2595 spec_info = 0;
2596 #ifdef ENABLE_CHECKING
2597 check_sched_flags ();
2598 #endif
2600 else
2601 /* So we won't read anything accidentally. */
2602 spec_info = 0;
2604 /* Initialize issue_rate. */
2605 if (targetm.sched.issue_rate)
2606 issue_rate = targetm.sched.issue_rate ();
2607 else
2608 issue_rate = 1;
2610 if (cached_issue_rate != issue_rate)
2612 cached_issue_rate = issue_rate;
2613 /* To invalidate max_lookahead_tries: */
2614 cached_first_cycle_multipass_dfa_lookahead = 0;
2617 old_max_uid = 0;
2618 h_i_d = 0;
2619 extend_h_i_d ();
2621 for (i = 0; i < old_max_uid; i++)
2623 h_i_d[i].cost = -1;
2624 h_i_d[i].todo_spec = HARD_DEP;
2625 h_i_d[i].queue_index = QUEUE_NOWHERE;
2626 h_i_d[i].tick = INVALID_TICK;
2627 h_i_d[i].inter_tick = INVALID_TICK;
2630 if (targetm.sched.init_dfa_pre_cycle_insn)
2631 targetm.sched.init_dfa_pre_cycle_insn ();
2633 if (targetm.sched.init_dfa_post_cycle_insn)
2634 targetm.sched.init_dfa_post_cycle_insn ();
2636 dfa_start ();
2637 dfa_state_size = state_size ();
2638 curr_state = xmalloc (dfa_state_size);
2640 h_i_d[0].luid = 0;
2641 luid = 1;
2642 FOR_EACH_BB (b)
2643 for (insn = BB_HEAD (b); ; insn = NEXT_INSN (insn))
2645 INSN_LUID (insn) = luid;
2647 /* Increment the next luid, unless this is a note. We don't
2648 really need separate IDs for notes and we don't want to
2649 schedule differently depending on whether or not there are
2650 line-number notes, i.e., depending on whether or not we're
2651 generating debugging information. */
2652 if (!NOTE_P (insn))
2653 ++luid;
2655 if (insn == BB_END (b))
2656 break;
2659 init_dependency_caches (luid);
2661 init_alias_analysis ();
2663 old_last_basic_block = 0;
2664 glat_start = 0;
2665 glat_end = 0;
2666 extend_bb ();
2668 if (current_sched_info->flags & USE_GLAT)
2669 init_glat ();
2671 /* Compute INSN_REG_WEIGHT for all blocks. We must do this before
2672 removing death notes. */
2673 FOR_EACH_BB_REVERSE (b)
2674 find_insn_reg_weight (b);
2676 if (targetm.sched.md_init_global)
2677 targetm.sched.md_init_global (sched_dump, sched_verbose, old_max_uid);
2679 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
2680 before_recovery = 0;
2682 #ifdef ENABLE_CHECKING
2683 /* This is used preferably for finding bugs in check_cfg () itself. */
2684 check_cfg (0, 0);
2685 #endif
2688 /* Free global data used during insn scheduling. */
2690 void
2691 sched_finish (void)
2693 free (h_i_d);
2694 free (curr_state);
2695 dfa_finish ();
2696 free_dependency_caches ();
2697 end_alias_analysis ();
2698 free_glat ();
2700 if (targetm.sched.md_finish_global)
2701 targetm.sched.md_finish_global (sched_dump, sched_verbose);
2703 if (spec_info && spec_info->dump)
2705 char c = reload_completed ? 'a' : 'b';
2707 fprintf (spec_info->dump,
2708 ";; %s:\n", current_function_name ());
2710 fprintf (spec_info->dump,
2711 ";; Procedure %cr-begin-data-spec motions == %d\n",
2712 c, nr_begin_data);
2713 fprintf (spec_info->dump,
2714 ";; Procedure %cr-be-in-data-spec motions == %d\n",
2715 c, nr_be_in_data);
2716 fprintf (spec_info->dump,
2717 ";; Procedure %cr-begin-control-spec motions == %d\n",
2718 c, nr_begin_control);
2719 fprintf (spec_info->dump,
2720 ";; Procedure %cr-be-in-control-spec motions == %d\n",
2721 c, nr_be_in_control);
2724 #ifdef ENABLE_CHECKING
2725 /* After reload ia64 backend clobbers CFG, so can't check anything. */
2726 if (!reload_completed)
2727 check_cfg (0, 0);
2728 #endif
2730 current_sched_info = NULL;
2733 /* Fix INSN_TICKs of the instructions in the current block as well as
2734 INSN_TICKs of their dependents.
2735 HEAD and TAIL are the begin and the end of the current scheduled block. */
2736 static void
2737 fix_inter_tick (rtx head, rtx tail)
2739 /* Set of instructions with corrected INSN_TICK. */
2740 bitmap_head processed;
2741 int next_clock = clock_var + 1;
2743 bitmap_initialize (&processed, 0);
2745 /* Iterates over scheduled instructions and fix their INSN_TICKs and
2746 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
2747 across different blocks. */
2748 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
2750 if (INSN_P (head))
2752 int tick;
2753 dep_link_t link;
2755 tick = INSN_TICK (head);
2756 gcc_assert (tick >= MIN_TICK);
2758 /* Fix INSN_TICK of instruction from just scheduled block. */
2759 if (!bitmap_bit_p (&processed, INSN_LUID (head)))
2761 bitmap_set_bit (&processed, INSN_LUID (head));
2762 tick -= next_clock;
2764 if (tick < MIN_TICK)
2765 tick = MIN_TICK;
2767 INSN_TICK (head) = tick;
2770 FOR_EACH_DEP_LINK (link, INSN_FORW_DEPS (head))
2772 rtx next;
2774 next = DEP_LINK_CON (link);
2775 tick = INSN_TICK (next);
2777 if (tick != INVALID_TICK
2778 /* If NEXT has its INSN_TICK calculated, fix it.
2779 If not - it will be properly calculated from
2780 scratch later in fix_tick_ready. */
2781 && !bitmap_bit_p (&processed, INSN_LUID (next)))
2783 bitmap_set_bit (&processed, INSN_LUID (next));
2784 tick -= next_clock;
2786 if (tick < MIN_TICK)
2787 tick = MIN_TICK;
2789 if (tick > INTER_TICK (next))
2790 INTER_TICK (next) = tick;
2791 else
2792 tick = INTER_TICK (next);
2794 INSN_TICK (next) = tick;
2799 bitmap_clear (&processed);
2802 /* Check if NEXT is ready to be added to the ready or queue list.
2803 If "yes", add it to the proper list.
2804 Returns:
2805 -1 - is not ready yet,
2806 0 - added to the ready list,
2807 0 < N - queued for N cycles. */
2809 try_ready (rtx next)
2811 ds_t old_ts, *ts;
2812 dep_link_t link;
2814 ts = &TODO_SPEC (next);
2815 old_ts = *ts;
2817 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
2818 && ((old_ts & HARD_DEP)
2819 || (old_ts & SPECULATIVE)));
2821 if (!(current_sched_info->flags & DO_SPECULATION))
2823 if (deps_list_empty_p (INSN_BACK_DEPS (next)))
2824 *ts &= ~HARD_DEP;
2826 else
2828 *ts &= ~SPECULATIVE & ~HARD_DEP;
2830 link = DEPS_LIST_FIRST (INSN_BACK_DEPS (next));
2832 if (link != NULL)
2834 ds_t ds = DEP_LINK_STATUS (link) & SPECULATIVE;
2836 /* Backward dependencies of the insn are maintained sorted.
2837 So if DEP_STATUS of the first dep is SPECULATIVE,
2838 than all other deps are speculative too. */
2839 if (ds != 0)
2841 /* Now we've got NEXT with speculative deps only.
2842 1. Look at the deps to see what we have to do.
2843 2. Check if we can do 'todo'. */
2844 *ts = ds;
2846 while ((link = DEP_LINK_NEXT (link)) != NULL)
2848 ds = DEP_LINK_STATUS (link) & SPECULATIVE;
2849 *ts = ds_merge (*ts, ds);
2852 if (dep_weak (*ts) < spec_info->weakness_cutoff)
2853 /* Too few points. */
2854 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
2856 else
2857 *ts |= HARD_DEP;
2861 if (*ts & HARD_DEP)
2862 gcc_assert (*ts == old_ts
2863 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
2864 else if (current_sched_info->new_ready)
2865 *ts = current_sched_info->new_ready (next, *ts);
2867 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
2868 have its original pattern or changed (speculative) one. This is due
2869 to changing ebb in region scheduling.
2870 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
2871 has speculative pattern.
2873 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
2874 control-speculative NEXT could have been discarded by sched-rgn.c
2875 (the same case as when discarded by can_schedule_ready_p ()). */
2877 if ((*ts & SPECULATIVE)
2878 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
2879 need to change anything. */
2880 && *ts != old_ts)
2882 int res;
2883 rtx new_pat;
2885 gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
2887 res = speculate_insn (next, *ts, &new_pat);
2889 switch (res)
2891 case -1:
2892 /* It would be nice to change DEP_STATUS of all dependences,
2893 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
2894 so we won't reanalyze anything. */
2895 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
2896 break;
2898 case 0:
2899 /* We follow the rule, that every speculative insn
2900 has non-null ORIG_PAT. */
2901 if (!ORIG_PAT (next))
2902 ORIG_PAT (next) = PATTERN (next);
2903 break;
2905 case 1:
2906 if (!ORIG_PAT (next))
2907 /* If we gonna to overwrite the original pattern of insn,
2908 save it. */
2909 ORIG_PAT (next) = PATTERN (next);
2911 change_pattern (next, new_pat);
2912 break;
2914 default:
2915 gcc_unreachable ();
2919 /* We need to restore pattern only if (*ts == 0), because otherwise it is
2920 either correct (*ts & SPECULATIVE),
2921 or we simply don't care (*ts & HARD_DEP). */
2923 gcc_assert (!ORIG_PAT (next)
2924 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
2926 if (*ts & HARD_DEP)
2928 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
2929 control-speculative NEXT could have been discarded by sched-rgn.c
2930 (the same case as when discarded by can_schedule_ready_p ()). */
2931 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
2933 change_queue_index (next, QUEUE_NOWHERE);
2934 return -1;
2936 else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
2937 /* We should change pattern of every previously speculative
2938 instruction - and we determine if NEXT was speculative by using
2939 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
2940 pat too, so skip them. */
2942 change_pattern (next, ORIG_PAT (next));
2943 ORIG_PAT (next) = 0;
2946 if (sched_verbose >= 2)
2948 int s = TODO_SPEC (next);
2950 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
2951 (*current_sched_info->print_insn) (next, 0));
2953 if (spec_info && spec_info->dump)
2955 if (s & BEGIN_DATA)
2956 fprintf (spec_info->dump, "; data-spec;");
2957 if (s & BEGIN_CONTROL)
2958 fprintf (spec_info->dump, "; control-spec;");
2959 if (s & BE_IN_CONTROL)
2960 fprintf (spec_info->dump, "; in-control-spec;");
2963 fprintf (sched_dump, "\n");
2966 adjust_priority (next);
2968 return fix_tick_ready (next);
2971 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
2972 static int
2973 fix_tick_ready (rtx next)
2975 int tick, delay;
2977 if (!deps_list_empty_p (INSN_RESOLVED_BACK_DEPS (next)))
2979 int full_p;
2980 dep_link_t link;
2982 tick = INSN_TICK (next);
2983 /* if tick is not equal to INVALID_TICK, then update
2984 INSN_TICK of NEXT with the most recent resolved dependence
2985 cost. Otherwise, recalculate from scratch. */
2986 full_p = (tick == INVALID_TICK);
2988 FOR_EACH_DEP_LINK (link, INSN_RESOLVED_BACK_DEPS (next))
2990 dep_t dep = DEP_LINK_DEP (link);
2991 rtx pro = DEP_PRO (dep);
2992 int tick1;
2994 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
2996 tick1 = INSN_TICK (pro) + dep_cost (dep);
2997 if (tick1 > tick)
2998 tick = tick1;
3000 if (!full_p)
3001 break;
3004 else
3005 tick = -1;
3007 INSN_TICK (next) = tick;
3009 delay = tick - clock_var;
3010 if (delay <= 0)
3011 delay = QUEUE_READY;
3013 change_queue_index (next, delay);
3015 return delay;
3018 /* Move NEXT to the proper queue list with (DELAY >= 1),
3019 or add it to the ready list (DELAY == QUEUE_READY),
3020 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
3021 static void
3022 change_queue_index (rtx next, int delay)
3024 int i = QUEUE_INDEX (next);
3026 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
3027 && delay != 0);
3028 gcc_assert (i != QUEUE_SCHEDULED);
3030 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
3031 || (delay < 0 && delay == i))
3032 /* We have nothing to do. */
3033 return;
3035 /* Remove NEXT from wherever it is now. */
3036 if (i == QUEUE_READY)
3037 ready_remove_insn (next);
3038 else if (i >= 0)
3039 queue_remove (next);
3041 /* Add it to the proper place. */
3042 if (delay == QUEUE_READY)
3043 ready_add (readyp, next, false);
3044 else if (delay >= 1)
3045 queue_insn (next, delay);
3047 if (sched_verbose >= 2)
3049 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
3050 (*current_sched_info->print_insn) (next, 0));
3052 if (delay == QUEUE_READY)
3053 fprintf (sched_dump, " into ready\n");
3054 else if (delay >= 1)
3055 fprintf (sched_dump, " into queue with cost=%d\n", delay);
3056 else
3057 fprintf (sched_dump, " removed from ready or queue lists\n");
3061 /* Extend H_I_D data. */
3062 static void
3063 extend_h_i_d (void)
3065 /* We use LUID 0 for the fake insn (UID 0) which holds dependencies for
3066 pseudos which do not cross calls. */
3067 int new_max_uid = get_max_uid () + 1;
3069 h_i_d = xrecalloc (h_i_d, new_max_uid, old_max_uid, sizeof (*h_i_d));
3070 old_max_uid = new_max_uid;
3072 if (targetm.sched.h_i_d_extended)
3073 targetm.sched.h_i_d_extended ();
3076 /* Extend READY, READY_TRY and CHOICE_STACK arrays.
3077 N_NEW_INSNS is the number of additional elements to allocate. */
3078 static void
3079 extend_ready (int n_new_insns)
3081 int i;
3083 readyp->veclen = rgn_n_insns + n_new_insns + 1 + issue_rate;
3084 readyp->vec = XRESIZEVEC (rtx, readyp->vec, readyp->veclen);
3086 ready_try = xrecalloc (ready_try, rgn_n_insns + n_new_insns + 1,
3087 rgn_n_insns + 1, sizeof (char));
3089 rgn_n_insns += n_new_insns;
3091 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
3092 rgn_n_insns + 1);
3094 for (i = rgn_n_insns; n_new_insns--; i--)
3095 choice_stack[i].state = xmalloc (dfa_state_size);
3098 /* Extend global scheduler structures (those, that live across calls to
3099 schedule_block) to include information about just emitted INSN. */
3100 static void
3101 extend_global (rtx insn)
3103 gcc_assert (INSN_P (insn));
3104 /* These structures have scheduler scope. */
3105 extend_h_i_d ();
3106 init_h_i_d (insn);
3108 extend_dependency_caches (1, 0);
3111 /* Extends global and local scheduler structures to include information
3112 about just emitted INSN. */
3113 static void
3114 extend_all (rtx insn)
3116 extend_global (insn);
3118 /* These structures have block scope. */
3119 extend_ready (1);
3121 (*current_sched_info->add_remove_insn) (insn, 0);
3124 /* Initialize h_i_d entry of the new INSN with default values.
3125 Values, that are not explicitly initialized here, hold zero. */
3126 static void
3127 init_h_i_d (rtx insn)
3129 INSN_LUID (insn) = luid++;
3130 INSN_COST (insn) = -1;
3131 TODO_SPEC (insn) = HARD_DEP;
3132 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
3133 INSN_TICK (insn) = INVALID_TICK;
3134 INTER_TICK (insn) = INVALID_TICK;
3135 find_insn_reg_weight1 (insn);
3137 /* These two lists will be freed in schedule_insn (). */
3138 INSN_BACK_DEPS (insn) = create_deps_list (false);
3139 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list (false);
3141 /* This one should be allocated on the obstack because it should live till
3142 the scheduling ends. */
3143 INSN_FORW_DEPS (insn) = create_deps_list (true);
3146 /* Generates recovery code for INSN. */
3147 static void
3148 generate_recovery_code (rtx insn)
3150 if (TODO_SPEC (insn) & BEGIN_SPEC)
3151 begin_speculative_block (insn);
3153 /* Here we have insn with no dependencies to
3154 instructions other then CHECK_SPEC ones. */
3156 if (TODO_SPEC (insn) & BE_IN_SPEC)
3157 add_to_speculative_block (insn);
3160 /* Helper function.
3161 Tries to add speculative dependencies of type FS between instructions
3162 in deps_list L and TWIN. */
3163 static void
3164 process_insn_forw_deps_be_in_spec (deps_list_t l, rtx twin, ds_t fs)
3166 dep_link_t link;
3168 FOR_EACH_DEP_LINK (link, l)
3170 ds_t ds;
3171 rtx consumer;
3173 consumer = DEP_LINK_CON (link);
3175 ds = DEP_LINK_STATUS (link);
3177 if (/* If we want to create speculative dep. */
3179 /* And we can do that because this is a true dep. */
3180 && (ds & DEP_TYPES) == DEP_TRUE)
3182 gcc_assert (!(ds & BE_IN_SPEC));
3184 if (/* If this dep can be overcome with 'begin speculation'. */
3185 ds & BEGIN_SPEC)
3186 /* Then we have a choice: keep the dep 'begin speculative'
3187 or transform it into 'be in speculative'. */
3189 if (/* In try_ready we assert that if insn once became ready
3190 it can be removed from the ready (or queue) list only
3191 due to backend decision. Hence we can't let the
3192 probability of the speculative dep to decrease. */
3193 dep_weak (ds) <= dep_weak (fs))
3194 /* Transform it to be in speculative. */
3195 ds = (ds & ~BEGIN_SPEC) | fs;
3197 else
3198 /* Mark the dep as 'be in speculative'. */
3199 ds |= fs;
3202 add_back_forw_dep (consumer, twin, DEP_LINK_KIND (link), ds);
3206 /* Generates recovery code for BEGIN speculative INSN. */
3207 static void
3208 begin_speculative_block (rtx insn)
3210 if (TODO_SPEC (insn) & BEGIN_DATA)
3211 nr_begin_data++;
3212 if (TODO_SPEC (insn) & BEGIN_CONTROL)
3213 nr_begin_control++;
3215 create_check_block_twin (insn, false);
3217 TODO_SPEC (insn) &= ~BEGIN_SPEC;
3220 /* Generates recovery code for BE_IN speculative INSN. */
3221 static void
3222 add_to_speculative_block (rtx insn)
3224 ds_t ts;
3225 dep_link_t link;
3226 rtx twins = NULL;
3228 ts = TODO_SPEC (insn);
3229 gcc_assert (!(ts & ~BE_IN_SPEC));
3231 if (ts & BE_IN_DATA)
3232 nr_be_in_data++;
3233 if (ts & BE_IN_CONTROL)
3234 nr_be_in_control++;
3236 TODO_SPEC (insn) &= ~BE_IN_SPEC;
3237 gcc_assert (!TODO_SPEC (insn));
3239 DONE_SPEC (insn) |= ts;
3241 /* First we convert all simple checks to branchy. */
3242 for (link = DEPS_LIST_FIRST (INSN_BACK_DEPS (insn)); link != NULL;)
3244 rtx check = DEP_LINK_PRO (link);
3246 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
3248 create_check_block_twin (check, true);
3250 /* Restart search. */
3251 link = DEPS_LIST_FIRST (INSN_BACK_DEPS (insn));
3253 else
3254 /* Continue search. */
3255 link = DEP_LINK_NEXT (link);
3258 clear_priorities (insn);
3262 dep_link_t link;
3263 rtx check, twin;
3264 basic_block rec;
3266 link = DEPS_LIST_FIRST (INSN_BACK_DEPS (insn));
3268 gcc_assert ((DEP_LINK_STATUS (link) & BEGIN_SPEC) == 0
3269 && (DEP_LINK_STATUS (link) & BE_IN_SPEC) != 0
3270 && (DEP_LINK_STATUS (link) & DEP_TYPES) == DEP_TRUE);
3272 check = DEP_LINK_PRO (link);
3274 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
3275 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
3277 rec = BLOCK_FOR_INSN (check);
3279 twin = emit_insn_before (copy_rtx (PATTERN (insn)), BB_END (rec));
3280 extend_global (twin);
3282 copy_deps_list_change_con (INSN_RESOLVED_BACK_DEPS (twin),
3283 INSN_RESOLVED_BACK_DEPS (insn),
3284 twin);
3286 if (sched_verbose && spec_info->dump)
3287 /* INSN_BB (insn) isn't determined for twin insns yet.
3288 So we can't use current_sched_info->print_insn. */
3289 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3290 INSN_UID (twin), rec->index);
3292 twins = alloc_INSN_LIST (twin, twins);
3294 /* Add dependences between TWIN and all appropriate
3295 instructions from REC. */
3298 add_back_forw_dep (twin, check, REG_DEP_TRUE, DEP_TRUE);
3302 link = DEP_LINK_NEXT (link);
3304 if (link != NULL)
3306 check = DEP_LINK_PRO (link);
3307 if (BLOCK_FOR_INSN (check) == rec)
3308 break;
3310 else
3311 break;
3313 while (1);
3315 while (link != NULL);
3317 process_insn_forw_deps_be_in_spec (INSN_FORW_DEPS (insn), twin, ts);
3319 /* Remove all dependencies between INSN and insns in REC. */
3320 for (link = DEPS_LIST_FIRST (INSN_BACK_DEPS (insn)); link != NULL;)
3322 check = DEP_LINK_PRO (link);
3324 if (BLOCK_FOR_INSN (check) == rec)
3326 delete_back_forw_dep (link);
3328 /* Restart search. */
3329 link = DEPS_LIST_FIRST (INSN_BACK_DEPS (insn));
3331 else
3332 /* Continue search. */
3333 link = DEP_LINK_NEXT (link);
3336 while (!deps_list_empty_p (INSN_BACK_DEPS (insn)));
3338 /* We couldn't have added the dependencies between INSN and TWINS earlier
3339 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
3340 while (twins)
3342 rtx twin;
3344 twin = XEXP (twins, 0);
3345 calc_priorities (twin);
3346 add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
3348 twin = XEXP (twins, 1);
3349 free_INSN_LIST_node (twins);
3350 twins = twin;
3354 /* Extends and fills with zeros (only the new part) array pointed to by P. */
3355 void *
3356 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
3358 gcc_assert (new_nmemb >= old_nmemb);
3359 p = XRESIZEVAR (void, p, new_nmemb * size);
3360 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
3361 return p;
3364 /* Return the probability of speculation success for the speculation
3365 status DS. */
3366 static dw_t
3367 dep_weak (ds_t ds)
3369 ds_t res = 1, dt;
3370 int n = 0;
3372 dt = FIRST_SPEC_TYPE;
3375 if (ds & dt)
3377 res *= (ds_t) get_dep_weak (ds, dt);
3378 n++;
3381 if (dt == LAST_SPEC_TYPE)
3382 break;
3383 dt <<= SPEC_TYPE_SHIFT;
3385 while (1);
3387 gcc_assert (n);
3388 while (--n)
3389 res /= MAX_DEP_WEAK;
3391 if (res < MIN_DEP_WEAK)
3392 res = MIN_DEP_WEAK;
3394 gcc_assert (res <= MAX_DEP_WEAK);
3396 return (dw_t) res;
3399 /* Helper function.
3400 Find fallthru edge from PRED. */
3401 static edge
3402 find_fallthru_edge (basic_block pred)
3404 edge e;
3405 edge_iterator ei;
3406 basic_block succ;
3408 succ = pred->next_bb;
3409 gcc_assert (succ->prev_bb == pred);
3411 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
3413 FOR_EACH_EDGE (e, ei, pred->succs)
3414 if (e->flags & EDGE_FALLTHRU)
3416 gcc_assert (e->dest == succ);
3417 return e;
3420 else
3422 FOR_EACH_EDGE (e, ei, succ->preds)
3423 if (e->flags & EDGE_FALLTHRU)
3425 gcc_assert (e->src == pred);
3426 return e;
3430 return NULL;
3433 /* Initialize BEFORE_RECOVERY variable. */
3434 static void
3435 init_before_recovery (void)
3437 basic_block last;
3438 edge e;
3440 last = EXIT_BLOCK_PTR->prev_bb;
3441 e = find_fallthru_edge (last);
3443 if (e)
3445 /* We create two basic blocks:
3446 1. Single instruction block is inserted right after E->SRC
3447 and has jump to
3448 2. Empty block right before EXIT_BLOCK.
3449 Between these two blocks recovery blocks will be emitted. */
3451 basic_block single, empty;
3452 rtx x, label;
3454 single = create_empty_bb (last);
3455 empty = create_empty_bb (single);
3457 single->count = last->count;
3458 empty->count = last->count;
3459 single->frequency = last->frequency;
3460 empty->frequency = last->frequency;
3461 BB_COPY_PARTITION (single, last);
3462 BB_COPY_PARTITION (empty, last);
3464 redirect_edge_succ (e, single);
3465 make_single_succ_edge (single, empty, 0);
3466 make_single_succ_edge (empty, EXIT_BLOCK_PTR,
3467 EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
3469 label = block_label (empty);
3470 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
3471 JUMP_LABEL (x) = label;
3472 LABEL_NUSES (label)++;
3473 extend_global (x);
3475 emit_barrier_after (x);
3477 add_block (empty, 0);
3478 add_block (single, 0);
3480 before_recovery = single;
3482 if (sched_verbose >= 2 && spec_info->dump)
3483 fprintf (spec_info->dump,
3484 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
3485 last->index, single->index, empty->index);
3487 else
3488 before_recovery = last;
3491 /* Returns new recovery block. */
3492 static basic_block
3493 create_recovery_block (void)
3495 rtx label;
3496 rtx barrier;
3497 basic_block rec;
3499 added_recovery_block_p = true;
3501 if (!before_recovery)
3502 init_before_recovery ();
3504 barrier = get_last_bb_insn (before_recovery);
3505 gcc_assert (BARRIER_P (barrier));
3507 label = emit_label_after (gen_label_rtx (), barrier);
3509 rec = create_basic_block (label, label, before_recovery);
3511 /* Recovery block always end with an unconditional jump. */
3512 emit_barrier_after (BB_END (rec));
3514 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
3515 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
3517 if (sched_verbose && spec_info->dump)
3518 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
3519 rec->index);
3521 before_recovery = rec;
3523 return rec;
3526 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
3527 INSN is a simple check, that should be converted to branchy one. */
3528 static void
3529 create_check_block_twin (rtx insn, bool mutate_p)
3531 basic_block rec;
3532 rtx label, check, twin;
3533 dep_link_t link;
3534 ds_t fs;
3536 gcc_assert (ORIG_PAT (insn)
3537 && (!mutate_p
3538 || (IS_SPECULATION_SIMPLE_CHECK_P (insn)
3539 && !(TODO_SPEC (insn) & SPECULATIVE))));
3541 /* Create recovery block. */
3542 if (mutate_p || targetm.sched.needs_block_p (insn))
3544 rec = create_recovery_block ();
3545 label = BB_HEAD (rec);
3547 else
3549 rec = EXIT_BLOCK_PTR;
3550 label = 0;
3553 /* Emit CHECK. */
3554 check = targetm.sched.gen_check (insn, label, mutate_p);
3556 if (rec != EXIT_BLOCK_PTR)
3558 /* To have mem_reg alive at the beginning of second_bb,
3559 we emit check BEFORE insn, so insn after splitting
3560 insn will be at the beginning of second_bb, which will
3561 provide us with the correct life information. */
3562 check = emit_jump_insn_before (check, insn);
3563 JUMP_LABEL (check) = label;
3564 LABEL_NUSES (label)++;
3566 else
3567 check = emit_insn_before (check, insn);
3569 /* Extend data structures. */
3570 extend_all (check);
3571 RECOVERY_BLOCK (check) = rec;
3573 if (sched_verbose && spec_info->dump)
3574 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
3575 (*current_sched_info->print_insn) (check, 0));
3577 gcc_assert (ORIG_PAT (insn));
3579 /* Initialize TWIN (twin is a duplicate of original instruction
3580 in the recovery block). */
3581 if (rec != EXIT_BLOCK_PTR)
3583 FOR_EACH_DEP_LINK (link, INSN_RESOLVED_BACK_DEPS (insn))
3584 if ((DEP_LINK_STATUS (link) & DEP_OUTPUT) != 0)
3586 struct _dep _dep, *dep = &_dep;
3588 init_dep (dep, DEP_LINK_PRO (link), check, REG_DEP_TRUE);
3590 add_back_dep_to_deps_list (INSN_RESOLVED_BACK_DEPS (check), dep);
3593 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
3594 extend_global (twin);
3596 if (sched_verbose && spec_info->dump)
3597 /* INSN_BB (insn) isn't determined for twin insns yet.
3598 So we can't use current_sched_info->print_insn. */
3599 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3600 INSN_UID (twin), rec->index);
3602 else
3604 ORIG_PAT (check) = ORIG_PAT (insn);
3605 HAS_INTERNAL_DEP (check) = 1;
3606 twin = check;
3607 /* ??? We probably should change all OUTPUT dependencies to
3608 (TRUE | OUTPUT). */
3611 copy_deps_list_change_con (INSN_RESOLVED_BACK_DEPS (twin),
3612 INSN_RESOLVED_BACK_DEPS (insn),
3613 twin);
3615 if (rec != EXIT_BLOCK_PTR)
3616 /* In case of branchy check, fix CFG. */
3618 basic_block first_bb, second_bb;
3619 rtx jump;
3620 edge e;
3621 int edge_flags;
3623 first_bb = BLOCK_FOR_INSN (check);
3624 e = split_block (first_bb, check);
3625 /* split_block emits note if *check == BB_END. Probably it
3626 is better to rip that note off. */
3627 gcc_assert (e->src == first_bb);
3628 second_bb = e->dest;
3630 /* This is fixing of incoming edge. */
3631 /* ??? Which other flags should be specified? */
3632 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
3633 /* Partition type is the same, if it is "unpartitioned". */
3634 edge_flags = EDGE_CROSSING;
3635 else
3636 edge_flags = 0;
3638 e = make_edge (first_bb, rec, edge_flags);
3640 add_block (second_bb, first_bb);
3642 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (second_bb)));
3643 label = block_label (second_bb);
3644 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
3645 JUMP_LABEL (jump) = label;
3646 LABEL_NUSES (label)++;
3647 extend_global (jump);
3649 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
3650 /* Partition type is the same, if it is "unpartitioned". */
3652 /* Rewritten from cfgrtl.c. */
3653 if (flag_reorder_blocks_and_partition
3654 && targetm.have_named_sections
3655 /*&& !any_condjump_p (jump)*/)
3656 /* any_condjump_p (jump) == false.
3657 We don't need the same note for the check because
3658 any_condjump_p (check) == true. */
3660 REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP,
3661 NULL_RTX,
3662 REG_NOTES (jump));
3664 edge_flags = EDGE_CROSSING;
3666 else
3667 edge_flags = 0;
3669 make_single_succ_edge (rec, second_bb, edge_flags);
3671 add_block (rec, EXIT_BLOCK_PTR);
3674 /* Move backward dependences from INSN to CHECK and
3675 move forward dependences from INSN to TWIN. */
3676 FOR_EACH_DEP_LINK (link, INSN_BACK_DEPS (insn))
3678 rtx pro = DEP_LINK_PRO (link);
3679 enum reg_note dk = DEP_LINK_KIND (link);
3680 ds_t ds;
3682 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
3683 check --TRUE--> producer ??? or ANTI ???
3684 twin --TRUE--> producer
3685 twin --ANTI--> check
3687 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
3688 check --ANTI--> producer
3689 twin --ANTI--> producer
3690 twin --ANTI--> check
3692 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
3693 check ~~TRUE~~> producer
3694 twin ~~TRUE~~> producer
3695 twin --ANTI--> check */
3697 ds = DEP_LINK_STATUS (link);
3699 if (ds & BEGIN_SPEC)
3701 gcc_assert (!mutate_p);
3702 ds &= ~BEGIN_SPEC;
3705 if (rec != EXIT_BLOCK_PTR)
3707 add_back_forw_dep (check, pro, dk, ds);
3708 add_back_forw_dep (twin, pro, dk, ds);
3710 else
3711 add_back_forw_dep (check, pro, dk, ds);
3714 for (link = DEPS_LIST_FIRST (INSN_BACK_DEPS (insn)); link != NULL;)
3715 if ((DEP_LINK_STATUS (link) & BEGIN_SPEC)
3716 || mutate_p)
3717 /* We can delete this dep only if we totally overcome it with
3718 BEGIN_SPECULATION. */
3720 delete_back_forw_dep (link);
3722 /* Restart search. */
3723 link = DEPS_LIST_FIRST (INSN_BACK_DEPS (insn));
3725 else
3726 /* Continue search. */
3727 link = DEP_LINK_NEXT (link);
3729 fs = 0;
3731 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
3732 here. */
3734 gcc_assert (!DONE_SPEC (insn));
3736 if (!mutate_p)
3738 ds_t ts = TODO_SPEC (insn);
3740 DONE_SPEC (insn) = ts & BEGIN_SPEC;
3741 CHECK_SPEC (check) = ts & BEGIN_SPEC;
3743 if (ts & BEGIN_DATA)
3744 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
3745 if (ts & BEGIN_CONTROL)
3746 fs = set_dep_weak (fs, BE_IN_CONTROL, get_dep_weak (ts, BEGIN_CONTROL));
3748 else
3749 CHECK_SPEC (check) = CHECK_SPEC (insn);
3751 /* Future speculations: call the helper. */
3752 process_insn_forw_deps_be_in_spec (INSN_FORW_DEPS (insn), twin, fs);
3754 if (rec != EXIT_BLOCK_PTR)
3756 /* Which types of dependencies should we use here is,
3757 generally, machine-dependent question... But, for now,
3758 it is not. */
3760 if (!mutate_p)
3762 add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE);
3763 add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
3765 else
3767 dep_link_t link;
3769 if (spec_info->dump)
3770 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
3771 (*current_sched_info->print_insn) (insn, 0));
3773 /* Remove all forward dependencies of the INSN. */
3774 link = DEPS_LIST_FIRST (INSN_FORW_DEPS (insn));
3775 while (link != NULL)
3777 delete_back_forw_dep (link);
3778 link = DEPS_LIST_FIRST (INSN_FORW_DEPS (insn));
3781 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
3782 try_ready (check);
3784 sched_remove_insn (insn);
3787 add_back_forw_dep (twin, check, REG_DEP_ANTI, DEP_ANTI);
3789 else
3790 add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
3792 if (!mutate_p)
3793 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
3794 because it'll be done later in add_to_speculative_block. */
3796 clear_priorities (twin);
3797 calc_priorities (twin);
3801 /* Removes dependency between instructions in the recovery block REC
3802 and usual region instructions. It keeps inner dependences so it
3803 won't be necessary to recompute them. */
3804 static void
3805 fix_recovery_deps (basic_block rec)
3807 dep_link_t link;
3808 rtx note, insn, jump, ready_list = 0;
3809 bitmap_head in_ready;
3810 rtx link1;
3812 bitmap_initialize (&in_ready, 0);
3814 /* NOTE - a basic block note. */
3815 note = NEXT_INSN (BB_HEAD (rec));
3816 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
3817 insn = BB_END (rec);
3818 gcc_assert (JUMP_P (insn));
3819 insn = PREV_INSN (insn);
3823 for (link = DEPS_LIST_FIRST (INSN_FORW_DEPS (insn)); link != NULL;)
3825 rtx consumer;
3827 consumer = DEP_LINK_CON (link);
3829 if (BLOCK_FOR_INSN (consumer) != rec)
3831 delete_back_forw_dep (link);
3833 if (!bitmap_bit_p (&in_ready, INSN_LUID (consumer)))
3835 ready_list = alloc_INSN_LIST (consumer, ready_list);
3836 bitmap_set_bit (&in_ready, INSN_LUID (consumer));
3839 /* Restart search. */
3840 link = DEPS_LIST_FIRST (INSN_FORW_DEPS (insn));
3842 else
3844 gcc_assert ((DEP_LINK_STATUS (link) & DEP_TYPES) == DEP_TRUE);
3846 /* Continue search. */
3847 link = DEP_LINK_NEXT (link);
3851 insn = PREV_INSN (insn);
3853 while (insn != note);
3855 bitmap_clear (&in_ready);
3857 /* Try to add instructions to the ready or queue list. */
3858 for (link1 = ready_list; link1; link1 = XEXP (link1, 1))
3859 try_ready (XEXP (link1, 0));
3860 free_INSN_LIST_list (&ready_list);
3862 /* Fixing jump's dependences. */
3863 insn = BB_HEAD (rec);
3864 jump = BB_END (rec);
3866 gcc_assert (LABEL_P (insn));
3867 insn = NEXT_INSN (insn);
3869 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
3870 add_jump_dependencies (insn, jump);
3873 /* Changes pattern of the INSN to NEW_PAT. */
3874 static void
3875 change_pattern (rtx insn, rtx new_pat)
3877 int t;
3879 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
3880 gcc_assert (t);
3881 /* Invalidate INSN_COST, so it'll be recalculated. */
3882 INSN_COST (insn) = -1;
3883 /* Invalidate INSN_TICK, so it'll be recalculated. */
3884 INSN_TICK (insn) = INVALID_TICK;
3885 dfa_clear_single_insn_cache (insn);
3889 /* -1 - can't speculate,
3890 0 - for speculation with REQUEST mode it is OK to use
3891 current instruction pattern,
3892 1 - need to change pattern for *NEW_PAT to be speculative. */
3893 static int
3894 speculate_insn (rtx insn, ds_t request, rtx *new_pat)
3896 gcc_assert (current_sched_info->flags & DO_SPECULATION
3897 && (request & SPECULATIVE));
3899 if (!NONJUMP_INSN_P (insn)
3900 || HAS_INTERNAL_DEP (insn)
3901 || SCHED_GROUP_P (insn)
3902 || side_effects_p (PATTERN (insn))
3903 || (request & spec_info->mask) != request)
3904 return -1;
3906 gcc_assert (!IS_SPECULATION_CHECK_P (insn));
3908 if (request & BE_IN_SPEC)
3910 if (may_trap_p (PATTERN (insn)))
3911 return -1;
3913 if (!(request & BEGIN_SPEC))
3914 return 0;
3917 return targetm.sched.speculate_insn (insn, request & BEGIN_SPEC, new_pat);
3920 /* Print some information about block BB, which starts with HEAD and
3921 ends with TAIL, before scheduling it.
3922 I is zero, if scheduler is about to start with the fresh ebb. */
3923 static void
3924 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
3926 if (!i)
3927 fprintf (sched_dump,
3928 ";; ======================================================\n");
3929 else
3930 fprintf (sched_dump,
3931 ";; =====================ADVANCING TO=====================\n");
3932 fprintf (sched_dump,
3933 ";; -- basic block %d from %d to %d -- %s reload\n",
3934 bb->index, INSN_UID (head), INSN_UID (tail),
3935 (reload_completed ? "after" : "before"));
3936 fprintf (sched_dump,
3937 ";; ======================================================\n");
3938 fprintf (sched_dump, "\n");
3941 /* Unlink basic block notes and labels and saves them, so they
3942 can be easily restored. We unlink basic block notes in EBB to
3943 provide back-compatibility with the previous code, as target backends
3944 assume, that there'll be only instructions between
3945 current_sched_info->{head and tail}. We restore these notes as soon
3946 as we can.
3947 FIRST (LAST) is the first (last) basic block in the ebb.
3948 NB: In usual case (FIRST == LAST) nothing is really done. */
3949 void
3950 unlink_bb_notes (basic_block first, basic_block last)
3952 /* We DON'T unlink basic block notes of the first block in the ebb. */
3953 if (first == last)
3954 return;
3956 bb_header = xmalloc (last_basic_block * sizeof (*bb_header));
3958 /* Make a sentinel. */
3959 if (last->next_bb != EXIT_BLOCK_PTR)
3960 bb_header[last->next_bb->index] = 0;
3962 first = first->next_bb;
3965 rtx prev, label, note, next;
3967 label = BB_HEAD (last);
3968 if (LABEL_P (label))
3969 note = NEXT_INSN (label);
3970 else
3971 note = label;
3972 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
3974 prev = PREV_INSN (label);
3975 next = NEXT_INSN (note);
3976 gcc_assert (prev && next);
3978 NEXT_INSN (prev) = next;
3979 PREV_INSN (next) = prev;
3981 bb_header[last->index] = label;
3983 if (last == first)
3984 break;
3986 last = last->prev_bb;
3988 while (1);
3991 /* Restore basic block notes.
3992 FIRST is the first basic block in the ebb. */
3993 static void
3994 restore_bb_notes (basic_block first)
3996 if (!bb_header)
3997 return;
3999 /* We DON'T unlink basic block notes of the first block in the ebb. */
4000 first = first->next_bb;
4001 /* Remember: FIRST is actually a second basic block in the ebb. */
4003 while (first != EXIT_BLOCK_PTR
4004 && bb_header[first->index])
4006 rtx prev, label, note, next;
4008 label = bb_header[first->index];
4009 prev = PREV_INSN (label);
4010 next = NEXT_INSN (prev);
4012 if (LABEL_P (label))
4013 note = NEXT_INSN (label);
4014 else
4015 note = label;
4016 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4018 bb_header[first->index] = 0;
4020 NEXT_INSN (prev) = label;
4021 NEXT_INSN (note) = next;
4022 PREV_INSN (next) = note;
4024 first = first->next_bb;
4027 free (bb_header);
4028 bb_header = 0;
4031 /* Extend per basic block data structures of the scheduler.
4032 If BB is NULL, initialize structures for the whole CFG.
4033 Otherwise, initialize them for the just created BB. */
4034 static void
4035 extend_bb (void)
4037 rtx insn;
4039 old_last_basic_block = last_basic_block;
4041 if (current_sched_info->flags & USE_GLAT)
4043 glat_start = xrealloc (glat_start,
4044 last_basic_block * sizeof (*glat_start));
4045 glat_end = xrealloc (glat_end, last_basic_block * sizeof (*glat_end));
4048 /* The following is done to keep current_sched_info->next_tail non null. */
4050 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
4051 if (NEXT_INSN (insn) == 0
4052 || (!NOTE_P (insn)
4053 && !LABEL_P (insn)
4054 /* Don't emit a NOTE if it would end up before a BARRIER. */
4055 && !BARRIER_P (NEXT_INSN (insn))))
4057 emit_note_after (NOTE_INSN_DELETED, insn);
4058 /* Make insn to appear outside BB. */
4059 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
4063 /* Add a basic block BB to extended basic block EBB.
4064 If EBB is EXIT_BLOCK_PTR, then BB is recovery block.
4065 If EBB is NULL, then BB should be a new region. */
4066 void
4067 add_block (basic_block bb, basic_block ebb)
4069 gcc_assert (current_sched_info->flags & DETACH_LIFE_INFO
4070 && bb->il.rtl->global_live_at_start == 0
4071 && bb->il.rtl->global_live_at_end == 0);
4073 extend_bb ();
4075 glat_start[bb->index] = 0;
4076 glat_end[bb->index] = 0;
4078 if (current_sched_info->add_block)
4079 /* This changes only data structures of the front-end. */
4080 current_sched_info->add_block (bb, ebb);
4083 /* Helper function.
4084 Fix CFG after both in- and inter-block movement of
4085 control_flow_insn_p JUMP. */
4086 static void
4087 fix_jump_move (rtx jump)
4089 basic_block bb, jump_bb, jump_bb_next;
4091 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4092 jump_bb = BLOCK_FOR_INSN (jump);
4093 jump_bb_next = jump_bb->next_bb;
4095 gcc_assert (current_sched_info->flags & SCHED_EBB
4096 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
4098 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
4099 /* if jump_bb_next is not empty. */
4100 BB_END (jump_bb) = BB_END (jump_bb_next);
4102 if (BB_END (bb) != PREV_INSN (jump))
4103 /* Then there are instruction after jump that should be placed
4104 to jump_bb_next. */
4105 BB_END (jump_bb_next) = BB_END (bb);
4106 else
4107 /* Otherwise jump_bb_next is empty. */
4108 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
4110 /* To make assertion in move_insn happy. */
4111 BB_END (bb) = PREV_INSN (jump);
4113 update_bb_for_insn (jump_bb_next);
4116 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
4117 static void
4118 move_block_after_check (rtx jump)
4120 basic_block bb, jump_bb, jump_bb_next;
4121 VEC(edge,gc) *t;
4123 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4124 jump_bb = BLOCK_FOR_INSN (jump);
4125 jump_bb_next = jump_bb->next_bb;
4127 update_bb_for_insn (jump_bb);
4129 gcc_assert (IS_SPECULATION_CHECK_P (jump)
4130 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
4132 unlink_block (jump_bb_next);
4133 link_block (jump_bb_next, bb);
4135 t = bb->succs;
4136 bb->succs = 0;
4137 move_succs (&(jump_bb->succs), bb);
4138 move_succs (&(jump_bb_next->succs), jump_bb);
4139 move_succs (&t, jump_bb_next);
4141 if (current_sched_info->fix_recovery_cfg)
4142 current_sched_info->fix_recovery_cfg
4143 (bb->index, jump_bb->index, jump_bb_next->index);
4146 /* Helper function for move_block_after_check.
4147 This functions attaches edge vector pointed to by SUCCSP to
4148 block TO. */
4149 static void
4150 move_succs (VEC(edge,gc) **succsp, basic_block to)
4152 edge e;
4153 edge_iterator ei;
4155 gcc_assert (to->succs == 0);
4157 to->succs = *succsp;
4159 FOR_EACH_EDGE (e, ei, to->succs)
4160 e->src = to;
4162 *succsp = 0;
4165 /* Initialize GLAT (global_live_at_{start, end}) structures.
4166 GLAT structures are used to substitute global_live_{start, end}
4167 regsets during scheduling. This is necessary to use such functions as
4168 split_block (), as they assume consistency of register live information. */
4169 static void
4170 init_glat (void)
4172 basic_block bb;
4174 FOR_ALL_BB (bb)
4175 init_glat1 (bb);
4178 /* Helper function for init_glat. */
4179 static void
4180 init_glat1 (basic_block bb)
4182 gcc_assert (bb->il.rtl->global_live_at_start != 0
4183 && bb->il.rtl->global_live_at_end != 0);
4185 glat_start[bb->index] = bb->il.rtl->global_live_at_start;
4186 glat_end[bb->index] = bb->il.rtl->global_live_at_end;
4188 if (current_sched_info->flags & DETACH_LIFE_INFO)
4190 bb->il.rtl->global_live_at_start = 0;
4191 bb->il.rtl->global_live_at_end = 0;
4195 /* Attach reg_live_info back to basic blocks.
4196 Also save regsets, that should not have been changed during scheduling,
4197 for checking purposes (see check_reg_live). */
4198 void
4199 attach_life_info (void)
4201 basic_block bb;
4203 FOR_ALL_BB (bb)
4204 attach_life_info1 (bb);
4207 /* Helper function for attach_life_info. */
4208 static void
4209 attach_life_info1 (basic_block bb)
4211 gcc_assert (bb->il.rtl->global_live_at_start == 0
4212 && bb->il.rtl->global_live_at_end == 0);
4214 if (glat_start[bb->index])
4216 gcc_assert (glat_end[bb->index]);
4218 bb->il.rtl->global_live_at_start = glat_start[bb->index];
4219 bb->il.rtl->global_live_at_end = glat_end[bb->index];
4221 /* Make them NULL, so they won't be freed in free_glat. */
4222 glat_start[bb->index] = 0;
4223 glat_end[bb->index] = 0;
4225 #ifdef ENABLE_CHECKING
4226 if (bb->index < NUM_FIXED_BLOCKS
4227 || current_sched_info->region_head_or_leaf_p (bb, 0))
4229 glat_start[bb->index] = ALLOC_REG_SET (&reg_obstack);
4230 COPY_REG_SET (glat_start[bb->index],
4231 bb->il.rtl->global_live_at_start);
4234 if (bb->index < NUM_FIXED_BLOCKS
4235 || current_sched_info->region_head_or_leaf_p (bb, 1))
4237 glat_end[bb->index] = ALLOC_REG_SET (&reg_obstack);
4238 COPY_REG_SET (glat_end[bb->index], bb->il.rtl->global_live_at_end);
4240 #endif
4242 else
4244 gcc_assert (!glat_end[bb->index]);
4246 bb->il.rtl->global_live_at_start = ALLOC_REG_SET (&reg_obstack);
4247 bb->il.rtl->global_live_at_end = ALLOC_REG_SET (&reg_obstack);
4251 /* Free GLAT information. */
4252 static void
4253 free_glat (void)
4255 #ifdef ENABLE_CHECKING
4256 if (current_sched_info->flags & DETACH_LIFE_INFO)
4258 basic_block bb;
4260 FOR_ALL_BB (bb)
4262 if (glat_start[bb->index])
4263 FREE_REG_SET (glat_start[bb->index]);
4264 if (glat_end[bb->index])
4265 FREE_REG_SET (glat_end[bb->index]);
4268 #endif
4270 free (glat_start);
4271 free (glat_end);
4274 /* Remove INSN from the instruction stream.
4275 INSN should have any dependencies. */
4276 static void
4277 sched_remove_insn (rtx insn)
4279 change_queue_index (insn, QUEUE_NOWHERE);
4280 current_sched_info->add_remove_insn (insn, 1);
4281 remove_insn (insn);
4284 /* Clear priorities of all instructions, that are
4285 forward dependent on INSN. */
4286 static void
4287 clear_priorities (rtx insn)
4289 dep_link_t link;
4291 FOR_EACH_DEP_LINK (link, INSN_BACK_DEPS (insn))
4293 rtx pro = DEP_LINK_PRO (link);
4295 if (INSN_PRIORITY_KNOWN (pro))
4297 INSN_PRIORITY_KNOWN (pro) = 0;
4298 clear_priorities (pro);
4303 /* Recompute priorities of instructions, whose priorities might have been
4304 changed due to changes in INSN. */
4305 static void
4306 calc_priorities (rtx insn)
4308 dep_link_t link;
4310 FOR_EACH_DEP_LINK (link, INSN_BACK_DEPS (insn))
4312 rtx pro = DEP_LINK_PRO (link);
4314 if (!INSN_PRIORITY_KNOWN (pro))
4316 priority (pro);
4317 calc_priorities (pro);
4323 /* Add dependences between JUMP and other instructions in the recovery
4324 block. INSN is the first insn the recovery block. */
4325 static void
4326 add_jump_dependencies (rtx insn, rtx jump)
4330 insn = NEXT_INSN (insn);
4331 if (insn == jump)
4332 break;
4334 if (deps_list_empty_p (INSN_FORW_DEPS (insn)))
4335 add_back_forw_dep (jump, insn, REG_DEP_ANTI, DEP_ANTI);
4337 while (1);
4339 gcc_assert (!deps_list_empty_p (INSN_BACK_DEPS (jump)));
4342 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
4344 bb_note (basic_block bb)
4346 rtx note;
4348 note = BB_HEAD (bb);
4349 if (LABEL_P (note))
4350 note = NEXT_INSN (note);
4352 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4353 return note;
4356 #ifdef ENABLE_CHECKING
4357 extern void debug_spec_status (ds_t);
4359 /* Dump information about the dependence status S. */
4360 void
4361 debug_spec_status (ds_t s)
4363 FILE *f = stderr;
4365 if (s & BEGIN_DATA)
4366 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak (s, BEGIN_DATA));
4367 if (s & BE_IN_DATA)
4368 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak (s, BE_IN_DATA));
4369 if (s & BEGIN_CONTROL)
4370 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak (s, BEGIN_CONTROL));
4371 if (s & BE_IN_CONTROL)
4372 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak (s, BE_IN_CONTROL));
4374 if (s & HARD_DEP)
4375 fprintf (f, "HARD_DEP; ");
4377 if (s & DEP_TRUE)
4378 fprintf (f, "DEP_TRUE; ");
4379 if (s & DEP_ANTI)
4380 fprintf (f, "DEP_ANTI; ");
4381 if (s & DEP_OUTPUT)
4382 fprintf (f, "DEP_OUTPUT; ");
4384 fprintf (f, "\n");
4387 /* Helper function for check_cfg.
4388 Return nonzero, if edge vector pointed to by EL has edge with TYPE in
4389 its flags. */
4390 static int
4391 has_edge_p (VEC(edge,gc) *el, int type)
4393 edge e;
4394 edge_iterator ei;
4396 FOR_EACH_EDGE (e, ei, el)
4397 if (e->flags & type)
4398 return 1;
4399 return 0;
4402 /* Check few properties of CFG between HEAD and TAIL.
4403 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
4404 instruction stream. */
4405 static void
4406 check_cfg (rtx head, rtx tail)
4408 rtx next_tail;
4409 basic_block bb = 0;
4410 int not_first = 0, not_last;
4412 if (head == NULL)
4413 head = get_insns ();
4414 if (tail == NULL)
4415 tail = get_last_insn ();
4416 next_tail = NEXT_INSN (tail);
4420 not_last = head != tail;
4422 if (not_first)
4423 gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
4424 if (not_last)
4425 gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
4427 if (LABEL_P (head)
4428 || (NOTE_INSN_BASIC_BLOCK_P (head)
4429 && (!not_first
4430 || (not_first && !LABEL_P (PREV_INSN (head))))))
4432 gcc_assert (bb == 0);
4433 bb = BLOCK_FOR_INSN (head);
4434 if (bb != 0)
4435 gcc_assert (BB_HEAD (bb) == head);
4436 else
4437 /* This is the case of jump table. See inside_basic_block_p (). */
4438 gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
4441 if (bb == 0)
4443 gcc_assert (!inside_basic_block_p (head));
4444 head = NEXT_INSN (head);
4446 else
4448 gcc_assert (inside_basic_block_p (head)
4449 || NOTE_P (head));
4450 gcc_assert (BLOCK_FOR_INSN (head) == bb);
4452 if (LABEL_P (head))
4454 head = NEXT_INSN (head);
4455 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
4457 else
4459 if (control_flow_insn_p (head))
4461 gcc_assert (BB_END (bb) == head);
4463 if (any_uncondjump_p (head))
4464 gcc_assert (EDGE_COUNT (bb->succs) == 1
4465 && BARRIER_P (NEXT_INSN (head)));
4466 else if (any_condjump_p (head))
4467 gcc_assert (/* Usual case. */
4468 (EDGE_COUNT (bb->succs) > 1
4469 && !BARRIER_P (NEXT_INSN (head)))
4470 /* Or jump to the next instruction. */
4471 || (EDGE_COUNT (bb->succs) == 1
4472 && (BB_HEAD (EDGE_I (bb->succs, 0)->dest)
4473 == JUMP_LABEL (head))));
4475 if (BB_END (bb) == head)
4477 if (EDGE_COUNT (bb->succs) > 1)
4478 gcc_assert (control_flow_insn_p (head)
4479 || has_edge_p (bb->succs, EDGE_COMPLEX));
4480 bb = 0;
4483 head = NEXT_INSN (head);
4487 not_first = 1;
4489 while (head != next_tail);
4491 gcc_assert (bb == 0);
4494 /* Perform a few consistency checks of flags in different data structures. */
4495 static void
4496 check_sched_flags (void)
4498 unsigned int f = current_sched_info->flags;
4500 if (flag_sched_stalled_insns)
4501 gcc_assert (!(f & DO_SPECULATION));
4502 if (f & DO_SPECULATION)
4503 gcc_assert (!flag_sched_stalled_insns
4504 && (f & DETACH_LIFE_INFO)
4505 && spec_info
4506 && spec_info->mask);
4507 if (f & DETACH_LIFE_INFO)
4508 gcc_assert (f & USE_GLAT);
4511 /* Check global_live_at_{start, end} regsets.
4512 If FATAL_P is TRUE, then abort execution at the first failure.
4513 Otherwise, print diagnostics to STDERR (this mode is for calling
4514 from debugger). */
4515 void
4516 check_reg_live (bool fatal_p)
4518 basic_block bb;
4520 FOR_ALL_BB (bb)
4522 int i;
4524 i = bb->index;
4526 if (glat_start[i])
4528 bool b = bitmap_equal_p (bb->il.rtl->global_live_at_start,
4529 glat_start[i]);
4531 if (!b)
4533 gcc_assert (!fatal_p);
4535 fprintf (stderr, ";; check_reg_live_at_start (%d) failed.\n", i);
4539 if (glat_end[i])
4541 bool b = bitmap_equal_p (bb->il.rtl->global_live_at_end,
4542 glat_end[i]);
4544 if (!b)
4546 gcc_assert (!fatal_p);
4548 fprintf (stderr, ";; check_reg_live_at_end (%d) failed.\n", i);
4553 #endif /* ENABLE_CHECKING */
4555 #endif /* INSN_SCHEDULING */