PR target/29377
[official-gcc.git] / gcc / haifa-sched.c
blobebe7f24ebb53466468e4a536d42936e471449e80
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 2, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
22 02110-1301, USA. */
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
55 remaining slots.
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
60 1. choose insn with the longest path to end of bb, ties
61 broken by
62 2. choose insn with least contribution to register pressure,
63 ties broken by
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
67 broken by
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using LOG_LINKS backward
86 dependences. LOG_LINKS are translated into INSN_DEPEND forward
87 dependences for the purpose of forward list scheduling.
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
96 utilization.
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
101 of this case.
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
127 #include "config.h"
128 #include "system.h"
129 #include "coretypes.h"
130 #include "tm.h"
131 #include "toplev.h"
132 #include "rtl.h"
133 #include "tm_p.h"
134 #include "hard-reg-set.h"
135 #include "regs.h"
136 #include "function.h"
137 #include "flags.h"
138 #include "insn-config.h"
139 #include "insn-attr.h"
140 #include "except.h"
141 #include "toplev.h"
142 #include "recog.h"
143 #include "sched-int.h"
144 #include "target.h"
145 #include "output.h"
146 #include "params.h"
148 #ifdef INSN_SCHEDULING
150 /* issue_rate is the number of insns that can be scheduled in the same
151 machine cycle. It can be defined in the config/mach/mach.h file,
152 otherwise we set it to 1. */
154 static int issue_rate;
156 /* sched-verbose controls the amount of debugging output the
157 scheduler prints. It is controlled by -fsched-verbose=N:
158 N>0 and no -DSR : the output is directed to stderr.
159 N>=10 will direct the printouts to stderr (regardless of -dSR).
160 N=1: same as -dSR.
161 N=2: bb's probabilities, detailed ready list info, unit/insn info.
162 N=3: rtl at abort point, control-flow, regions info.
163 N=5: dependences info. */
165 static int sched_verbose_param = 0;
166 int sched_verbose = 0;
168 /* Debugging file. All printouts are sent to dump, which is always set,
169 either to stderr, or to the dump listing file (-dRS). */
170 FILE *sched_dump = 0;
172 /* Highest uid before scheduling. */
173 static int old_max_uid;
175 /* fix_sched_param() is called from toplev.c upon detection
176 of the -fsched-verbose=N option. */
178 void
179 fix_sched_param (const char *param, const char *val)
181 if (!strcmp (param, "verbose"))
182 sched_verbose_param = atoi (val);
183 else
184 warning (0, "fix_sched_param: unknown param: %s", param);
187 struct haifa_insn_data *h_i_d;
189 #define LINE_NOTE(INSN) (h_i_d[INSN_UID (INSN)].line_note)
190 #define INSN_TICK(INSN) (h_i_d[INSN_UID (INSN)].tick)
191 #define INTER_TICK(INSN) (h_i_d[INSN_UID (INSN)].inter_tick)
193 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
194 then it should be recalculated from scratch. */
195 #define INVALID_TICK (-(max_insn_queue_index + 1))
196 /* The minimal value of the INSN_TICK of an instruction. */
197 #define MIN_TICK (-max_insn_queue_index)
199 /* Issue points are used to distinguish between instructions in max_issue ().
200 For now, all instructions are equally good. */
201 #define ISSUE_POINTS(INSN) 1
203 /* Vector indexed by basic block number giving the starting line-number
204 for each basic block. */
205 static rtx *line_note_head;
207 /* List of important notes we must keep around. This is a pointer to the
208 last element in the list. */
209 static rtx note_list;
211 static struct spec_info_def spec_info_var;
212 /* Description of the speculative part of the scheduling.
213 If NULL - no speculation. */
214 static spec_info_t spec_info;
216 /* True, if recovery block was added during scheduling of current block.
217 Used to determine, if we need to fix INSN_TICKs. */
218 static bool added_recovery_block_p;
220 /* Counters of different types of speculative instructions. */
221 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
223 /* Pointers to GLAT data. See init_glat for more information. */
224 regset *glat_start, *glat_end;
226 /* Array used in {unlink, restore}_bb_notes. */
227 static rtx *bb_header = 0;
229 /* Number of basic_blocks. */
230 static int old_last_basic_block;
232 /* Basic block after which recovery blocks will be created. */
233 static basic_block before_recovery;
235 /* Queues, etc. */
237 /* An instruction is ready to be scheduled when all insns preceding it
238 have already been scheduled. It is important to ensure that all
239 insns which use its result will not be executed until its result
240 has been computed. An insn is maintained in one of four structures:
242 (P) the "Pending" set of insns which cannot be scheduled until
243 their dependencies have been satisfied.
244 (Q) the "Queued" set of insns that can be scheduled when sufficient
245 time has passed.
246 (R) the "Ready" list of unscheduled, uncommitted insns.
247 (S) the "Scheduled" list of insns.
249 Initially, all insns are either "Pending" or "Ready" depending on
250 whether their dependencies are satisfied.
252 Insns move from the "Ready" list to the "Scheduled" list as they
253 are committed to the schedule. As this occurs, the insns in the
254 "Pending" list have their dependencies satisfied and move to either
255 the "Ready" list or the "Queued" set depending on whether
256 sufficient time has passed to make them ready. As time passes,
257 insns move from the "Queued" set to the "Ready" list.
259 The "Pending" list (P) are the insns in the INSN_DEPEND of the unscheduled
260 insns, i.e., those that are ready, queued, and pending.
261 The "Queued" set (Q) is implemented by the variable `insn_queue'.
262 The "Ready" list (R) is implemented by the variables `ready' and
263 `n_ready'.
264 The "Scheduled" list (S) is the new insn chain built by this pass.
266 The transition (R->S) is implemented in the scheduling loop in
267 `schedule_block' when the best insn to schedule is chosen.
268 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
269 insns move from the ready list to the scheduled list.
270 The transition (Q->R) is implemented in 'queue_to_insn' as time
271 passes or stalls are introduced. */
273 /* Implement a circular buffer to delay instructions until sufficient
274 time has passed. For the new pipeline description interface,
275 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
276 than maximal time of instruction execution computed by genattr.c on
277 the base maximal time of functional unit reservations and getting a
278 result. This is the longest time an insn may be queued. */
280 static rtx *insn_queue;
281 static int q_ptr = 0;
282 static int q_size = 0;
283 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
284 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
286 #define QUEUE_SCHEDULED (-3)
287 #define QUEUE_NOWHERE (-2)
288 #define QUEUE_READY (-1)
289 /* QUEUE_SCHEDULED - INSN is scheduled.
290 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
291 queue or ready list.
292 QUEUE_READY - INSN is in ready list.
293 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
295 #define QUEUE_INDEX(INSN) (h_i_d[INSN_UID (INSN)].queue_index)
297 /* The following variable value refers for all current and future
298 reservations of the processor units. */
299 state_t curr_state;
301 /* The following variable value is size of memory representing all
302 current and future reservations of the processor units. */
303 static size_t dfa_state_size;
305 /* The following array is used to find the best insn from ready when
306 the automaton pipeline interface is used. */
307 static char *ready_try;
309 /* Describe the ready list of the scheduler.
310 VEC holds space enough for all insns in the current region. VECLEN
311 says how many exactly.
312 FIRST is the index of the element with the highest priority; i.e. the
313 last one in the ready list, since elements are ordered by ascending
314 priority.
315 N_READY determines how many insns are on the ready list. */
317 struct ready_list
319 rtx *vec;
320 int veclen;
321 int first;
322 int n_ready;
325 /* The pointer to the ready list. */
326 static struct ready_list *readyp;
328 /* Scheduling clock. */
329 static int clock_var;
331 /* Number of instructions in current scheduling region. */
332 static int rgn_n_insns;
334 static int may_trap_exp (rtx, int);
336 /* Nonzero iff the address is comprised from at most 1 register. */
337 #define CONST_BASED_ADDRESS_P(x) \
338 (REG_P (x) \
339 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
340 || (GET_CODE (x) == LO_SUM)) \
341 && (CONSTANT_P (XEXP (x, 0)) \
342 || CONSTANT_P (XEXP (x, 1)))))
344 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
345 as found by analyzing insn's expression. */
347 static int
348 may_trap_exp (rtx x, int is_store)
350 enum rtx_code code;
352 if (x == 0)
353 return TRAP_FREE;
354 code = GET_CODE (x);
355 if (is_store)
357 if (code == MEM && may_trap_p (x))
358 return TRAP_RISKY;
359 else
360 return TRAP_FREE;
362 if (code == MEM)
364 /* The insn uses memory: a volatile load. */
365 if (MEM_VOLATILE_P (x))
366 return IRISKY;
367 /* An exception-free load. */
368 if (!may_trap_p (x))
369 return IFREE;
370 /* A load with 1 base register, to be further checked. */
371 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
372 return PFREE_CANDIDATE;
373 /* No info on the load, to be further checked. */
374 return PRISKY_CANDIDATE;
376 else
378 const char *fmt;
379 int i, insn_class = TRAP_FREE;
381 /* Neither store nor load, check if it may cause a trap. */
382 if (may_trap_p (x))
383 return TRAP_RISKY;
384 /* Recursive step: walk the insn... */
385 fmt = GET_RTX_FORMAT (code);
386 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
388 if (fmt[i] == 'e')
390 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
391 insn_class = WORST_CLASS (insn_class, tmp_class);
393 else if (fmt[i] == 'E')
395 int j;
396 for (j = 0; j < XVECLEN (x, i); j++)
398 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
399 insn_class = WORST_CLASS (insn_class, tmp_class);
400 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
401 break;
404 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
405 break;
407 return insn_class;
411 /* Classifies insn for the purpose of verifying that it can be
412 moved speculatively, by examining it's patterns, returning:
413 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
414 TRAP_FREE: non-load insn.
415 IFREE: load from a globally safe location.
416 IRISKY: volatile load.
417 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
418 being either PFREE or PRISKY. */
421 haifa_classify_insn (rtx insn)
423 rtx pat = PATTERN (insn);
424 int tmp_class = TRAP_FREE;
425 int insn_class = TRAP_FREE;
426 enum rtx_code code;
428 if (GET_CODE (pat) == PARALLEL)
430 int i, len = XVECLEN (pat, 0);
432 for (i = len - 1; i >= 0; i--)
434 code = GET_CODE (XVECEXP (pat, 0, i));
435 switch (code)
437 case CLOBBER:
438 /* Test if it is a 'store'. */
439 tmp_class = may_trap_exp (XEXP (XVECEXP (pat, 0, i), 0), 1);
440 break;
441 case SET:
442 /* Test if it is a store. */
443 tmp_class = may_trap_exp (SET_DEST (XVECEXP (pat, 0, i)), 1);
444 if (tmp_class == TRAP_RISKY)
445 break;
446 /* Test if it is a load. */
447 tmp_class
448 = WORST_CLASS (tmp_class,
449 may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)),
450 0));
451 break;
452 case COND_EXEC:
453 case TRAP_IF:
454 tmp_class = TRAP_RISKY;
455 break;
456 default:
459 insn_class = WORST_CLASS (insn_class, tmp_class);
460 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
461 break;
464 else
466 code = GET_CODE (pat);
467 switch (code)
469 case CLOBBER:
470 /* Test if it is a 'store'. */
471 tmp_class = may_trap_exp (XEXP (pat, 0), 1);
472 break;
473 case SET:
474 /* Test if it is a store. */
475 tmp_class = may_trap_exp (SET_DEST (pat), 1);
476 if (tmp_class == TRAP_RISKY)
477 break;
478 /* Test if it is a load. */
479 tmp_class =
480 WORST_CLASS (tmp_class,
481 may_trap_exp (SET_SRC (pat), 0));
482 break;
483 case COND_EXEC:
484 case TRAP_IF:
485 tmp_class = TRAP_RISKY;
486 break;
487 default:;
489 insn_class = tmp_class;
492 return insn_class;
495 /* Forward declarations. */
497 HAIFA_INLINE static int insn_cost1 (rtx, enum reg_note, rtx, rtx);
498 static int priority (rtx);
499 static int rank_for_schedule (const void *, const void *);
500 static void swap_sort (rtx *, int);
501 static void queue_insn (rtx, int);
502 static int schedule_insn (rtx);
503 static int find_set_reg_weight (rtx);
504 static void find_insn_reg_weight (basic_block);
505 static void find_insn_reg_weight1 (rtx);
506 static void adjust_priority (rtx);
507 static void advance_one_cycle (void);
509 /* Notes handling mechanism:
510 =========================
511 Generally, NOTES are saved before scheduling and restored after scheduling.
512 The scheduler distinguishes between three types of notes:
514 (1) LINE_NUMBER notes, generated and used for debugging. Here,
515 before scheduling a region, a pointer to the LINE_NUMBER note is
516 added to the insn following it (in save_line_notes()), and the note
517 is removed (in rm_line_notes() and unlink_line_notes()). After
518 scheduling the region, this pointer is used for regeneration of
519 the LINE_NUMBER note (in restore_line_notes()).
521 (2) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
522 Before scheduling a region, a pointer to the note is added to the insn
523 that follows or precedes it. (This happens as part of the data dependence
524 computation). After scheduling an insn, the pointer contained in it is
525 used for regenerating the corresponding note (in reemit_notes).
527 (3) All other notes (e.g. INSN_DELETED): Before scheduling a block,
528 these notes are put in a list (in rm_other_notes() and
529 unlink_other_notes ()). After scheduling the block, these notes are
530 inserted at the beginning of the block (in schedule_block()). */
532 static rtx unlink_other_notes (rtx, rtx);
533 static rtx unlink_line_notes (rtx, rtx);
534 static void reemit_notes (rtx);
536 static rtx *ready_lastpos (struct ready_list *);
537 static void ready_add (struct ready_list *, rtx, bool);
538 static void ready_sort (struct ready_list *);
539 static rtx ready_remove_first (struct ready_list *);
541 static void queue_to_ready (struct ready_list *);
542 static int early_queue_to_ready (state_t, struct ready_list *);
544 static void debug_ready_list (struct ready_list *);
546 static void move_insn (rtx);
548 /* The following functions are used to implement multi-pass scheduling
549 on the first cycle. */
550 static rtx ready_element (struct ready_list *, int);
551 static rtx ready_remove (struct ready_list *, int);
552 static void ready_remove_insn (rtx);
553 static int max_issue (struct ready_list *, int *, int);
555 static rtx choose_ready (struct ready_list *);
557 static void fix_inter_tick (rtx, rtx);
558 static int fix_tick_ready (rtx);
559 static void change_queue_index (rtx, int);
560 static void resolve_dep (rtx, rtx);
562 /* The following functions are used to implement scheduling of data/control
563 speculative instructions. */
565 static void extend_h_i_d (void);
566 static void extend_ready (int);
567 static void extend_global (rtx);
568 static void extend_all (rtx);
569 static void init_h_i_d (rtx);
570 static void generate_recovery_code (rtx);
571 static void process_insn_depend_be_in_spec (rtx, rtx, ds_t);
572 static void begin_speculative_block (rtx);
573 static void add_to_speculative_block (rtx);
574 static dw_t dep_weak (ds_t);
575 static edge find_fallthru_edge (basic_block);
576 static void init_before_recovery (void);
577 static basic_block create_recovery_block (void);
578 static void create_check_block_twin (rtx, bool);
579 static void fix_recovery_deps (basic_block);
580 static void associate_line_notes_with_blocks (basic_block);
581 static void change_pattern (rtx, rtx);
582 static int speculate_insn (rtx, ds_t, rtx *);
583 static void dump_new_block_header (int, basic_block, rtx, rtx);
584 static void restore_bb_notes (basic_block);
585 static void extend_bb (basic_block);
586 static void fix_jump_move (rtx);
587 static void move_block_after_check (rtx);
588 static void move_succs (VEC(edge,gc) **, basic_block);
589 static void init_glat (void);
590 static void init_glat1 (basic_block);
591 static void attach_life_info1 (basic_block);
592 static void free_glat (void);
593 static void sched_remove_insn (rtx);
594 static void clear_priorities (rtx);
595 static void add_jump_dependencies (rtx, rtx);
596 static void calc_priorities (rtx);
597 #ifdef ENABLE_CHECKING
598 static int has_edge_p (VEC(edge,gc) *, int);
599 static void check_cfg (rtx, rtx);
600 static void check_sched_flags (void);
601 #endif
603 #endif /* INSN_SCHEDULING */
605 /* Point to state used for the current scheduling pass. */
606 struct sched_info *current_sched_info;
608 #ifndef INSN_SCHEDULING
609 void
610 schedule_insns (void)
613 #else
615 /* Working copy of frontend's sched_info variable. */
616 static struct sched_info current_sched_info_var;
618 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
619 so that insns independent of the last scheduled insn will be preferred
620 over dependent instructions. */
622 static rtx last_scheduled_insn;
624 /* Compute cost of executing INSN given the dependence LINK on the insn USED.
625 This is the number of cycles between instruction issue and
626 instruction results. */
628 HAIFA_INLINE int
629 insn_cost (rtx insn, rtx link, rtx used)
631 return insn_cost1 (insn, used ? REG_NOTE_KIND (link) : REG_NOTE_MAX,
632 link, used);
635 /* Compute cost of executing INSN given the dependence on the insn USED.
636 If LINK is not NULL, then its REG_NOTE_KIND is used as a dependence type.
637 Otherwise, dependence between INSN and USED is assumed to be of type
638 DEP_TYPE. This function was introduced as a workaround for
639 targetm.adjust_cost hook.
640 This is the number of cycles between instruction issue and
641 instruction results. */
643 HAIFA_INLINE static int
644 insn_cost1 (rtx insn, enum reg_note dep_type, rtx link, rtx used)
646 int cost = INSN_COST (insn);
648 if (cost < 0)
650 /* A USE insn, or something else we don't need to
651 understand. We can't pass these directly to
652 result_ready_cost or insn_default_latency because it will
653 trigger a fatal error for unrecognizable insns. */
654 if (recog_memoized (insn) < 0)
656 INSN_COST (insn) = 0;
657 return 0;
659 else
661 cost = insn_default_latency (insn);
662 if (cost < 0)
663 cost = 0;
665 INSN_COST (insn) = cost;
669 /* In this case estimate cost without caring how insn is used. */
670 if (used == 0)
671 return cost;
673 /* A USE insn should never require the value used to be computed.
674 This allows the computation of a function's result and parameter
675 values to overlap the return and call. */
676 if (recog_memoized (used) < 0)
677 cost = 0;
678 else
680 gcc_assert (!link || dep_type == REG_NOTE_KIND (link));
682 if (INSN_CODE (insn) >= 0)
684 if (dep_type == REG_DEP_ANTI)
685 cost = 0;
686 else if (dep_type == REG_DEP_OUTPUT)
688 cost = (insn_default_latency (insn)
689 - insn_default_latency (used));
690 if (cost <= 0)
691 cost = 1;
693 else if (bypass_p (insn))
694 cost = insn_latency (insn, used);
697 if (targetm.sched.adjust_cost_2)
698 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost);
699 else
701 gcc_assert (link);
702 if (targetm.sched.adjust_cost)
703 cost = targetm.sched.adjust_cost (used, link, insn, cost);
706 if (cost < 0)
707 cost = 0;
710 return cost;
713 /* Compute the priority number for INSN. */
715 static int
716 priority (rtx insn)
718 rtx link;
720 if (! INSN_P (insn))
721 return 0;
723 if (! INSN_PRIORITY_KNOWN (insn))
725 int this_priority = 0;
727 if (INSN_DEPEND (insn) == 0)
728 this_priority = insn_cost (insn, 0, 0);
729 else
731 rtx prev_first, twin;
732 basic_block rec;
734 /* For recovery check instructions we calculate priority slightly
735 different than that of normal instructions. Instead of walking
736 through INSN_DEPEND (check) list, we walk through INSN_DEPEND list
737 of each instruction in the corresponding recovery block. */
739 rec = RECOVERY_BLOCK (insn);
740 if (!rec || rec == EXIT_BLOCK_PTR)
742 prev_first = PREV_INSN (insn);
743 twin = insn;
745 else
747 prev_first = NEXT_INSN (BB_HEAD (rec));
748 twin = PREV_INSN (BB_END (rec));
753 for (link = INSN_DEPEND (twin); link; link = XEXP (link, 1))
755 rtx next;
756 int next_priority;
758 next = XEXP (link, 0);
760 if (BLOCK_FOR_INSN (next) != rec)
762 /* Critical path is meaningful in block boundaries
763 only. */
764 if (! (*current_sched_info->contributes_to_priority)
765 (next, insn)
766 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
767 then speculative instructions will less likely be
768 scheduled. That is because the priority of
769 their producers will increase, and, thus, the
770 producers will more likely be scheduled, thus,
771 resolving the dependence. */
772 || ((current_sched_info->flags & DO_SPECULATION)
773 && (DEP_STATUS (link) & SPECULATIVE)
774 && !(spec_info->flags
775 & COUNT_SPEC_IN_CRITICAL_PATH)))
776 continue;
778 next_priority = insn_cost1 (insn,
779 twin == insn ?
780 REG_NOTE_KIND (link) :
781 REG_DEP_ANTI,
782 twin == insn ? link : 0,
783 next) + priority (next);
785 if (next_priority > this_priority)
786 this_priority = next_priority;
790 twin = PREV_INSN (twin);
792 while (twin != prev_first);
794 INSN_PRIORITY (insn) = this_priority;
795 INSN_PRIORITY_KNOWN (insn) = 1;
798 return INSN_PRIORITY (insn);
801 /* Macros and functions for keeping the priority queue sorted, and
802 dealing with queuing and dequeuing of instructions. */
804 #define SCHED_SORT(READY, N_READY) \
805 do { if ((N_READY) == 2) \
806 swap_sort (READY, N_READY); \
807 else if ((N_READY) > 2) \
808 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
809 while (0)
811 /* Returns a positive value if x is preferred; returns a negative value if
812 y is preferred. Should never return 0, since that will make the sort
813 unstable. */
815 static int
816 rank_for_schedule (const void *x, const void *y)
818 rtx tmp = *(const rtx *) y;
819 rtx tmp2 = *(const rtx *) x;
820 rtx link;
821 int tmp_class, tmp2_class, depend_count1, depend_count2;
822 int val, priority_val, weight_val, info_val;
824 /* The insn in a schedule group should be issued the first. */
825 if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
826 return SCHED_GROUP_P (tmp2) ? 1 : -1;
828 /* Prefer insn with higher priority. */
829 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
831 if (priority_val)
832 return priority_val;
834 /* Prefer speculative insn with greater dependencies weakness. */
835 if (spec_info)
837 ds_t ds1, ds2;
838 dw_t dw1, dw2;
839 int dw;
841 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
842 if (ds1)
843 dw1 = dep_weak (ds1);
844 else
845 dw1 = NO_DEP_WEAK;
847 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
848 if (ds2)
849 dw2 = dep_weak (ds2);
850 else
851 dw2 = NO_DEP_WEAK;
853 dw = dw2 - dw1;
854 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
855 return dw;
858 /* Prefer an insn with smaller contribution to registers-pressure. */
859 if (!reload_completed &&
860 (weight_val = INSN_REG_WEIGHT (tmp) - INSN_REG_WEIGHT (tmp2)))
861 return weight_val;
863 info_val = (*current_sched_info->rank) (tmp, tmp2);
864 if (info_val)
865 return info_val;
867 /* Compare insns based on their relation to the last-scheduled-insn. */
868 if (INSN_P (last_scheduled_insn))
870 /* Classify the instructions into three classes:
871 1) Data dependent on last schedule insn.
872 2) Anti/Output dependent on last scheduled insn.
873 3) Independent of last scheduled insn, or has latency of one.
874 Choose the insn from the highest numbered class if different. */
875 link = find_insn_list (tmp, INSN_DEPEND (last_scheduled_insn));
876 if (link == 0 || insn_cost (last_scheduled_insn, link, tmp) == 1)
877 tmp_class = 3;
878 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
879 tmp_class = 1;
880 else
881 tmp_class = 2;
883 link = find_insn_list (tmp2, INSN_DEPEND (last_scheduled_insn));
884 if (link == 0 || insn_cost (last_scheduled_insn, link, tmp2) == 1)
885 tmp2_class = 3;
886 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
887 tmp2_class = 1;
888 else
889 tmp2_class = 2;
891 if ((val = tmp2_class - tmp_class))
892 return val;
895 /* Prefer the insn which has more later insns that depend on it.
896 This gives the scheduler more freedom when scheduling later
897 instructions at the expense of added register pressure. */
898 depend_count1 = 0;
899 for (link = INSN_DEPEND (tmp); link; link = XEXP (link, 1))
900 depend_count1++;
902 depend_count2 = 0;
903 for (link = INSN_DEPEND (tmp2); link; link = XEXP (link, 1))
904 depend_count2++;
906 val = depend_count2 - depend_count1;
907 if (val)
908 return val;
910 /* If insns are equally good, sort by INSN_LUID (original insn order),
911 so that we make the sort stable. This minimizes instruction movement,
912 thus minimizing sched's effect on debugging and cross-jumping. */
913 return INSN_LUID (tmp) - INSN_LUID (tmp2);
916 /* Resort the array A in which only element at index N may be out of order. */
918 HAIFA_INLINE static void
919 swap_sort (rtx *a, int n)
921 rtx insn = a[n - 1];
922 int i = n - 2;
924 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
926 a[i + 1] = a[i];
927 i -= 1;
929 a[i + 1] = insn;
932 /* Add INSN to the insn queue so that it can be executed at least
933 N_CYCLES after the currently executing insn. Preserve insns
934 chain for debugging purposes. */
936 HAIFA_INLINE static void
937 queue_insn (rtx insn, int n_cycles)
939 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
940 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
942 gcc_assert (n_cycles <= max_insn_queue_index);
944 insn_queue[next_q] = link;
945 q_size += 1;
947 if (sched_verbose >= 2)
949 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
950 (*current_sched_info->print_insn) (insn, 0));
952 fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
955 QUEUE_INDEX (insn) = next_q;
958 /* Remove INSN from queue. */
959 static void
960 queue_remove (rtx insn)
962 gcc_assert (QUEUE_INDEX (insn) >= 0);
963 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
964 q_size--;
965 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
968 /* Return a pointer to the bottom of the ready list, i.e. the insn
969 with the lowest priority. */
971 HAIFA_INLINE static rtx *
972 ready_lastpos (struct ready_list *ready)
974 gcc_assert (ready->n_ready >= 1);
975 return ready->vec + ready->first - ready->n_ready + 1;
978 /* Add an element INSN to the ready list so that it ends up with the
979 lowest/highest priority depending on FIRST_P. */
981 HAIFA_INLINE static void
982 ready_add (struct ready_list *ready, rtx insn, bool first_p)
984 if (!first_p)
986 if (ready->first == ready->n_ready)
988 memmove (ready->vec + ready->veclen - ready->n_ready,
989 ready_lastpos (ready),
990 ready->n_ready * sizeof (rtx));
991 ready->first = ready->veclen - 1;
993 ready->vec[ready->first - ready->n_ready] = insn;
995 else
997 if (ready->first == ready->veclen - 1)
999 if (ready->n_ready)
1000 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1001 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1002 ready_lastpos (ready),
1003 ready->n_ready * sizeof (rtx));
1004 ready->first = ready->veclen - 2;
1006 ready->vec[++(ready->first)] = insn;
1009 ready->n_ready++;
1011 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1012 QUEUE_INDEX (insn) = QUEUE_READY;
1015 /* Remove the element with the highest priority from the ready list and
1016 return it. */
1018 HAIFA_INLINE static rtx
1019 ready_remove_first (struct ready_list *ready)
1021 rtx t;
1023 gcc_assert (ready->n_ready);
1024 t = ready->vec[ready->first--];
1025 ready->n_ready--;
1026 /* If the queue becomes empty, reset it. */
1027 if (ready->n_ready == 0)
1028 ready->first = ready->veclen - 1;
1030 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1031 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1033 return t;
1036 /* The following code implements multi-pass scheduling for the first
1037 cycle. In other words, we will try to choose ready insn which
1038 permits to start maximum number of insns on the same cycle. */
1040 /* Return a pointer to the element INDEX from the ready. INDEX for
1041 insn with the highest priority is 0, and the lowest priority has
1042 N_READY - 1. */
1044 HAIFA_INLINE static rtx
1045 ready_element (struct ready_list *ready, int index)
1047 gcc_assert (ready->n_ready && index < ready->n_ready);
1049 return ready->vec[ready->first - index];
1052 /* Remove the element INDEX from the ready list and return it. INDEX
1053 for insn with the highest priority is 0, and the lowest priority
1054 has N_READY - 1. */
1056 HAIFA_INLINE static rtx
1057 ready_remove (struct ready_list *ready, int index)
1059 rtx t;
1060 int i;
1062 if (index == 0)
1063 return ready_remove_first (ready);
1064 gcc_assert (ready->n_ready && index < ready->n_ready);
1065 t = ready->vec[ready->first - index];
1066 ready->n_ready--;
1067 for (i = index; i < ready->n_ready; i++)
1068 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1069 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1070 return t;
1073 /* Remove INSN from the ready list. */
1074 static void
1075 ready_remove_insn (rtx insn)
1077 int i;
1079 for (i = 0; i < readyp->n_ready; i++)
1080 if (ready_element (readyp, i) == insn)
1082 ready_remove (readyp, i);
1083 return;
1085 gcc_unreachable ();
1088 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1089 macro. */
1091 HAIFA_INLINE static void
1092 ready_sort (struct ready_list *ready)
1094 rtx *first = ready_lastpos (ready);
1095 SCHED_SORT (first, ready->n_ready);
1098 /* PREV is an insn that is ready to execute. Adjust its priority if that
1099 will help shorten or lengthen register lifetimes as appropriate. Also
1100 provide a hook for the target to tweek itself. */
1102 HAIFA_INLINE static void
1103 adjust_priority (rtx prev)
1105 /* ??? There used to be code here to try and estimate how an insn
1106 affected register lifetimes, but it did it by looking at REG_DEAD
1107 notes, which we removed in schedule_region. Nor did it try to
1108 take into account register pressure or anything useful like that.
1110 Revisit when we have a machine model to work with and not before. */
1112 if (targetm.sched.adjust_priority)
1113 INSN_PRIORITY (prev) =
1114 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1117 /* Advance time on one cycle. */
1118 HAIFA_INLINE static void
1119 advance_one_cycle (void)
1121 if (targetm.sched.dfa_pre_cycle_insn)
1122 state_transition (curr_state,
1123 targetm.sched.dfa_pre_cycle_insn ());
1125 state_transition (curr_state, NULL);
1127 if (targetm.sched.dfa_post_cycle_insn)
1128 state_transition (curr_state,
1129 targetm.sched.dfa_post_cycle_insn ());
1132 /* Clock at which the previous instruction was issued. */
1133 static int last_clock_var;
1135 /* INSN is the "currently executing insn". Launch each insn which was
1136 waiting on INSN. READY is the ready list which contains the insns
1137 that are ready to fire. CLOCK is the current cycle. The function
1138 returns necessary cycle advance after issuing the insn (it is not
1139 zero for insns in a schedule group). */
1141 static int
1142 schedule_insn (rtx insn)
1144 rtx link;
1145 int advance = 0;
1147 if (sched_verbose >= 1)
1149 char buf[2048];
1151 print_insn (buf, insn, 0);
1152 buf[40] = 0;
1153 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
1155 if (recog_memoized (insn) < 0)
1156 fprintf (sched_dump, "nothing");
1157 else
1158 print_reservation (sched_dump, insn);
1159 fputc ('\n', sched_dump);
1162 /* Scheduling instruction should have all its dependencies resolved and
1163 should have been removed from the ready list. */
1164 gcc_assert (INSN_DEP_COUNT (insn) == 0);
1165 gcc_assert (!LOG_LINKS (insn));
1166 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
1168 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
1170 /* Now we can free RESOLVED_DEPS list. */
1171 if (current_sched_info->flags & USE_DEPS_LIST)
1172 free_DEPS_LIST_list (&RESOLVED_DEPS (insn));
1173 else
1174 free_INSN_LIST_list (&RESOLVED_DEPS (insn));
1176 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1177 if (INSN_TICK (insn) > clock_var)
1178 /* INSN has been prematurely moved from the queue to the ready list.
1179 This is possible only if following flag is set. */
1180 gcc_assert (flag_sched_stalled_insns);
1182 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1183 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1184 INSN_TICK (insn) = clock_var;
1186 /* Update dependent instructions. */
1187 for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
1189 rtx next = XEXP (link, 0);
1191 resolve_dep (next, insn);
1193 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
1195 int effective_cost;
1197 effective_cost = try_ready (next);
1199 if (effective_cost >= 0
1200 && SCHED_GROUP_P (next)
1201 && advance < effective_cost)
1202 advance = effective_cost;
1204 else
1205 /* Check always has only one forward dependence (to the first insn in
1206 the recovery block), therefore, this will be executed only once. */
1208 gcc_assert (XEXP (link, 1) == 0);
1209 fix_recovery_deps (RECOVERY_BLOCK (insn));
1213 /* Annotate the instruction with issue information -- TImode
1214 indicates that the instruction is expected not to be able
1215 to issue on the same cycle as the previous insn. A machine
1216 may use this information to decide how the instruction should
1217 be aligned. */
1218 if (issue_rate > 1
1219 && GET_CODE (PATTERN (insn)) != USE
1220 && GET_CODE (PATTERN (insn)) != CLOBBER)
1222 if (reload_completed)
1223 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1224 last_clock_var = clock_var;
1227 return advance;
1230 /* Functions for handling of notes. */
1232 /* Delete notes beginning with INSN and put them in the chain
1233 of notes ended by NOTE_LIST.
1234 Returns the insn following the notes. */
1236 static rtx
1237 unlink_other_notes (rtx insn, rtx tail)
1239 rtx prev = PREV_INSN (insn);
1241 while (insn != tail && NOTE_NOT_BB_P (insn))
1243 rtx next = NEXT_INSN (insn);
1244 basic_block bb = BLOCK_FOR_INSN (insn);
1246 /* Delete the note from its current position. */
1247 if (prev)
1248 NEXT_INSN (prev) = next;
1249 if (next)
1250 PREV_INSN (next) = prev;
1252 if (bb)
1254 /* Basic block can begin with either LABEL or
1255 NOTE_INSN_BASIC_BLOCK. */
1256 gcc_assert (BB_HEAD (bb) != insn);
1258 /* Check if we are removing last insn in the BB. */
1259 if (BB_END (bb) == insn)
1260 BB_END (bb) = prev;
1263 /* See sched_analyze to see how these are handled. */
1264 if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
1265 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
1267 /* Insert the note at the end of the notes list. */
1268 PREV_INSN (insn) = note_list;
1269 if (note_list)
1270 NEXT_INSN (note_list) = insn;
1271 note_list = insn;
1274 insn = next;
1276 return insn;
1279 /* Delete line notes beginning with INSN. Record line-number notes so
1280 they can be reused. Returns the insn following the notes. */
1282 static rtx
1283 unlink_line_notes (rtx insn, rtx tail)
1285 rtx prev = PREV_INSN (insn);
1287 while (insn != tail && NOTE_NOT_BB_P (insn))
1289 rtx next = NEXT_INSN (insn);
1291 if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
1293 basic_block bb = BLOCK_FOR_INSN (insn);
1295 /* Delete the note from its current position. */
1296 if (prev)
1297 NEXT_INSN (prev) = next;
1298 if (next)
1299 PREV_INSN (next) = prev;
1301 if (bb)
1303 /* Basic block can begin with either LABEL or
1304 NOTE_INSN_BASIC_BLOCK. */
1305 gcc_assert (BB_HEAD (bb) != insn);
1307 /* Check if we are removing last insn in the BB. */
1308 if (BB_END (bb) == insn)
1309 BB_END (bb) = prev;
1312 /* Record line-number notes so they can be reused. */
1313 LINE_NOTE (insn) = insn;
1315 else
1316 prev = insn;
1318 insn = next;
1320 return insn;
1323 /* Return the head and tail pointers of ebb starting at BEG and ending
1324 at END. */
1326 void
1327 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
1329 rtx beg_head = BB_HEAD (beg);
1330 rtx beg_tail = BB_END (beg);
1331 rtx end_head = BB_HEAD (end);
1332 rtx end_tail = BB_END (end);
1334 /* Don't include any notes or labels at the beginning of the BEG
1335 basic block, or notes at the end of the END basic blocks. */
1337 if (LABEL_P (beg_head))
1338 beg_head = NEXT_INSN (beg_head);
1340 while (beg_head != beg_tail)
1341 if (NOTE_P (beg_head))
1342 beg_head = NEXT_INSN (beg_head);
1343 else
1344 break;
1346 *headp = beg_head;
1348 if (beg == end)
1349 end_head = beg_head;
1350 else if (LABEL_P (end_head))
1351 end_head = NEXT_INSN (end_head);
1353 while (end_head != end_tail)
1354 if (NOTE_P (end_tail))
1355 end_tail = PREV_INSN (end_tail);
1356 else
1357 break;
1359 *tailp = end_tail;
1362 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1365 no_real_insns_p (rtx head, rtx tail)
1367 while (head != NEXT_INSN (tail))
1369 if (!NOTE_P (head) && !LABEL_P (head))
1370 return 0;
1371 head = NEXT_INSN (head);
1373 return 1;
1376 /* Delete line notes from one block. Save them so they can be later restored
1377 (in restore_line_notes). HEAD and TAIL are the boundaries of the
1378 block in which notes should be processed. */
1380 void
1381 rm_line_notes (rtx head, rtx tail)
1383 rtx next_tail;
1384 rtx insn;
1386 next_tail = NEXT_INSN (tail);
1387 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1389 rtx prev;
1391 /* Farm out notes, and maybe save them in NOTE_LIST.
1392 This is needed to keep the debugger from
1393 getting completely deranged. */
1394 if (NOTE_NOT_BB_P (insn))
1396 prev = insn;
1397 insn = unlink_line_notes (insn, next_tail);
1399 gcc_assert (prev != tail && prev != head && insn != next_tail);
1404 /* Save line number notes for each insn in block B. HEAD and TAIL are
1405 the boundaries of the block in which notes should be processed. */
1407 void
1408 save_line_notes (int b, rtx head, rtx tail)
1410 rtx next_tail;
1412 /* We must use the true line number for the first insn in the block
1413 that was computed and saved at the start of this pass. We can't
1414 use the current line number, because scheduling of the previous
1415 block may have changed the current line number. */
1417 rtx line = line_note_head[b];
1418 rtx insn;
1420 next_tail = NEXT_INSN (tail);
1422 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1423 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1424 line = insn;
1425 else
1426 LINE_NOTE (insn) = line;
1429 /* After a block was scheduled, insert line notes into the insns list.
1430 HEAD and TAIL are the boundaries of the block in which notes should
1431 be processed. */
1433 void
1434 restore_line_notes (rtx head, rtx tail)
1436 rtx line, note, prev, new;
1437 int added_notes = 0;
1438 rtx next_tail, insn;
1440 head = head;
1441 next_tail = NEXT_INSN (tail);
1443 /* Determine the current line-number. We want to know the current
1444 line number of the first insn of the block here, in case it is
1445 different from the true line number that was saved earlier. If
1446 different, then we need a line number note before the first insn
1447 of this block. If it happens to be the same, then we don't want to
1448 emit another line number note here. */
1449 for (line = head; line; line = PREV_INSN (line))
1450 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
1451 break;
1453 /* Walk the insns keeping track of the current line-number and inserting
1454 the line-number notes as needed. */
1455 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1456 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1457 line = insn;
1458 /* This used to emit line number notes before every non-deleted note.
1459 However, this confuses a debugger, because line notes not separated
1460 by real instructions all end up at the same address. I can find no
1461 use for line number notes before other notes, so none are emitted. */
1462 else if (!NOTE_P (insn)
1463 && INSN_UID (insn) < old_max_uid
1464 && (note = LINE_NOTE (insn)) != 0
1465 && note != line
1466 && (line == 0
1467 #ifdef USE_MAPPED_LOCATION
1468 || NOTE_SOURCE_LOCATION (note) != NOTE_SOURCE_LOCATION (line)
1469 #else
1470 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
1471 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)
1472 #endif
1475 line = note;
1476 prev = PREV_INSN (insn);
1477 if (LINE_NOTE (note))
1479 /* Re-use the original line-number note. */
1480 LINE_NOTE (note) = 0;
1481 PREV_INSN (note) = prev;
1482 NEXT_INSN (prev) = note;
1483 PREV_INSN (insn) = note;
1484 NEXT_INSN (note) = insn;
1485 set_block_for_insn (note, BLOCK_FOR_INSN (insn));
1487 else
1489 added_notes++;
1490 new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
1491 #ifndef USE_MAPPED_LOCATION
1492 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
1493 #endif
1496 if (sched_verbose && added_notes)
1497 fprintf (sched_dump, ";; added %d line-number notes\n", added_notes);
1500 /* After scheduling the function, delete redundant line notes from the
1501 insns list. */
1503 void
1504 rm_redundant_line_notes (void)
1506 rtx line = 0;
1507 rtx insn = get_insns ();
1508 int active_insn = 0;
1509 int notes = 0;
1511 /* Walk the insns deleting redundant line-number notes. Many of these
1512 are already present. The remainder tend to occur at basic
1513 block boundaries. */
1514 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
1515 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1517 /* If there are no active insns following, INSN is redundant. */
1518 if (active_insn == 0)
1520 notes++;
1521 SET_INSN_DELETED (insn);
1523 /* If the line number is unchanged, LINE is redundant. */
1524 else if (line
1525 #ifdef USE_MAPPED_LOCATION
1526 && NOTE_SOURCE_LOCATION (line) == NOTE_SOURCE_LOCATION (insn)
1527 #else
1528 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
1529 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn)
1530 #endif
1533 notes++;
1534 SET_INSN_DELETED (line);
1535 line = insn;
1537 else
1538 line = insn;
1539 active_insn = 0;
1541 else if (!((NOTE_P (insn)
1542 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
1543 || (NONJUMP_INSN_P (insn)
1544 && (GET_CODE (PATTERN (insn)) == USE
1545 || GET_CODE (PATTERN (insn)) == CLOBBER))))
1546 active_insn++;
1548 if (sched_verbose && notes)
1549 fprintf (sched_dump, ";; deleted %d line-number notes\n", notes);
1552 /* Delete notes between HEAD and TAIL and put them in the chain
1553 of notes ended by NOTE_LIST. */
1555 void
1556 rm_other_notes (rtx head, rtx tail)
1558 rtx next_tail;
1559 rtx insn;
1561 note_list = 0;
1562 if (head == tail && (! INSN_P (head)))
1563 return;
1565 next_tail = NEXT_INSN (tail);
1566 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1568 rtx prev;
1570 /* Farm out notes, and maybe save them in NOTE_LIST.
1571 This is needed to keep the debugger from
1572 getting completely deranged. */
1573 if (NOTE_NOT_BB_P (insn))
1575 prev = insn;
1577 insn = unlink_other_notes (insn, next_tail);
1579 gcc_assert (prev != tail && prev != head && insn != next_tail);
1584 /* Functions for computation of registers live/usage info. */
1586 /* This function looks for a new register being defined.
1587 If the destination register is already used by the source,
1588 a new register is not needed. */
1590 static int
1591 find_set_reg_weight (rtx x)
1593 if (GET_CODE (x) == CLOBBER
1594 && register_operand (SET_DEST (x), VOIDmode))
1595 return 1;
1596 if (GET_CODE (x) == SET
1597 && register_operand (SET_DEST (x), VOIDmode))
1599 if (REG_P (SET_DEST (x)))
1601 if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
1602 return 1;
1603 else
1604 return 0;
1606 return 1;
1608 return 0;
1611 /* Calculate INSN_REG_WEIGHT for all insns of a block. */
1613 static void
1614 find_insn_reg_weight (basic_block bb)
1616 rtx insn, next_tail, head, tail;
1618 get_ebb_head_tail (bb, bb, &head, &tail);
1619 next_tail = NEXT_INSN (tail);
1621 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1622 find_insn_reg_weight1 (insn);
1625 /* Calculate INSN_REG_WEIGHT for single instruction.
1626 Separated from find_insn_reg_weight because of need
1627 to initialize new instruction in generate_recovery_code. */
1628 static void
1629 find_insn_reg_weight1 (rtx insn)
1631 int reg_weight = 0;
1632 rtx x;
1634 /* Handle register life information. */
1635 if (! INSN_P (insn))
1636 return;
1638 /* Increment weight for each register born here. */
1639 x = PATTERN (insn);
1640 reg_weight += find_set_reg_weight (x);
1641 if (GET_CODE (x) == PARALLEL)
1643 int j;
1644 for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
1646 x = XVECEXP (PATTERN (insn), 0, j);
1647 reg_weight += find_set_reg_weight (x);
1650 /* Decrement weight for each register that dies here. */
1651 for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
1653 if (REG_NOTE_KIND (x) == REG_DEAD
1654 || REG_NOTE_KIND (x) == REG_UNUSED)
1655 reg_weight--;
1658 INSN_REG_WEIGHT (insn) = reg_weight;
1661 /* Move insns that became ready to fire from queue to ready list. */
1663 static void
1664 queue_to_ready (struct ready_list *ready)
1666 rtx insn;
1667 rtx link;
1669 q_ptr = NEXT_Q (q_ptr);
1671 /* Add all pending insns that can be scheduled without stalls to the
1672 ready list. */
1673 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
1675 insn = XEXP (link, 0);
1676 q_size -= 1;
1678 if (sched_verbose >= 2)
1679 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1680 (*current_sched_info->print_insn) (insn, 0));
1682 /* If the ready list is full, delay the insn for 1 cycle.
1683 See the comment in schedule_block for the rationale. */
1684 if (!reload_completed
1685 && ready->n_ready > MAX_SCHED_READY_INSNS
1686 && !SCHED_GROUP_P (insn))
1688 if (sched_verbose >= 2)
1689 fprintf (sched_dump, "requeued because ready full\n");
1690 queue_insn (insn, 1);
1692 else
1694 ready_add (ready, insn, false);
1695 if (sched_verbose >= 2)
1696 fprintf (sched_dump, "moving to ready without stalls\n");
1699 free_INSN_LIST_list (&insn_queue[q_ptr]);
1701 /* If there are no ready insns, stall until one is ready and add all
1702 of the pending insns at that point to the ready list. */
1703 if (ready->n_ready == 0)
1705 int stalls;
1707 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
1709 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1711 for (; link; link = XEXP (link, 1))
1713 insn = XEXP (link, 0);
1714 q_size -= 1;
1716 if (sched_verbose >= 2)
1717 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1718 (*current_sched_info->print_insn) (insn, 0));
1720 ready_add (ready, insn, false);
1721 if (sched_verbose >= 2)
1722 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
1724 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
1726 advance_one_cycle ();
1728 break;
1731 advance_one_cycle ();
1734 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
1735 clock_var += stalls;
1739 /* Used by early_queue_to_ready. Determines whether it is "ok" to
1740 prematurely move INSN from the queue to the ready list. Currently,
1741 if a target defines the hook 'is_costly_dependence', this function
1742 uses the hook to check whether there exist any dependences which are
1743 considered costly by the target, between INSN and other insns that
1744 have already been scheduled. Dependences are checked up to Y cycles
1745 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
1746 controlling this value.
1747 (Other considerations could be taken into account instead (or in
1748 addition) depending on user flags and target hooks. */
1750 static bool
1751 ok_for_early_queue_removal (rtx insn)
1753 int n_cycles;
1754 rtx prev_insn = last_scheduled_insn;
1756 if (targetm.sched.is_costly_dependence)
1758 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
1760 for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
1762 rtx dep_link = 0;
1763 int dep_cost;
1765 if (!NOTE_P (prev_insn))
1767 dep_link = find_insn_list (insn, INSN_DEPEND (prev_insn));
1768 if (dep_link)
1770 dep_cost = insn_cost (prev_insn, dep_link, insn) ;
1771 if (targetm.sched.is_costly_dependence (prev_insn, insn,
1772 dep_link, dep_cost,
1773 flag_sched_stalled_insns_dep - n_cycles))
1774 return false;
1778 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
1779 break;
1782 if (!prev_insn)
1783 break;
1784 prev_insn = PREV_INSN (prev_insn);
1788 return true;
1792 /* Remove insns from the queue, before they become "ready" with respect
1793 to FU latency considerations. */
1795 static int
1796 early_queue_to_ready (state_t state, struct ready_list *ready)
1798 rtx insn;
1799 rtx link;
1800 rtx next_link;
1801 rtx prev_link;
1802 bool move_to_ready;
1803 int cost;
1804 state_t temp_state = alloca (dfa_state_size);
1805 int stalls;
1806 int insns_removed = 0;
1809 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
1810 function:
1812 X == 0: There is no limit on how many queued insns can be removed
1813 prematurely. (flag_sched_stalled_insns = -1).
1815 X >= 1: Only X queued insns can be removed prematurely in each
1816 invocation. (flag_sched_stalled_insns = X).
1818 Otherwise: Early queue removal is disabled.
1819 (flag_sched_stalled_insns = 0)
1822 if (! flag_sched_stalled_insns)
1823 return 0;
1825 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
1827 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1829 if (sched_verbose > 6)
1830 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
1832 prev_link = 0;
1833 while (link)
1835 next_link = XEXP (link, 1);
1836 insn = XEXP (link, 0);
1837 if (insn && sched_verbose > 6)
1838 print_rtl_single (sched_dump, insn);
1840 memcpy (temp_state, state, dfa_state_size);
1841 if (recog_memoized (insn) < 0)
1842 /* non-negative to indicate that it's not ready
1843 to avoid infinite Q->R->Q->R... */
1844 cost = 0;
1845 else
1846 cost = state_transition (temp_state, insn);
1848 if (sched_verbose >= 6)
1849 fprintf (sched_dump, "transition cost = %d\n", cost);
1851 move_to_ready = false;
1852 if (cost < 0)
1854 move_to_ready = ok_for_early_queue_removal (insn);
1855 if (move_to_ready == true)
1857 /* move from Q to R */
1858 q_size -= 1;
1859 ready_add (ready, insn, false);
1861 if (prev_link)
1862 XEXP (prev_link, 1) = next_link;
1863 else
1864 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
1866 free_INSN_LIST_node (link);
1868 if (sched_verbose >= 2)
1869 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
1870 (*current_sched_info->print_insn) (insn, 0));
1872 insns_removed++;
1873 if (insns_removed == flag_sched_stalled_insns)
1874 /* Remove no more than flag_sched_stalled_insns insns
1875 from Q at a time. */
1876 return insns_removed;
1880 if (move_to_ready == false)
1881 prev_link = link;
1883 link = next_link;
1884 } /* while link */
1885 } /* if link */
1887 } /* for stalls.. */
1889 return insns_removed;
1893 /* Print the ready list for debugging purposes. Callable from debugger. */
1895 static void
1896 debug_ready_list (struct ready_list *ready)
1898 rtx *p;
1899 int i;
1901 if (ready->n_ready == 0)
1903 fprintf (sched_dump, "\n");
1904 return;
1907 p = ready_lastpos (ready);
1908 for (i = 0; i < ready->n_ready; i++)
1909 fprintf (sched_dump, " %s", (*current_sched_info->print_insn) (p[i], 0));
1910 fprintf (sched_dump, "\n");
1913 /* Search INSN for REG_SAVE_NOTE note pairs for
1914 NOTE_INSN_EHREGION_{BEG,END}; and convert them back into
1915 NOTEs. The REG_SAVE_NOTE note following first one is contains the
1916 saved value for NOTE_BLOCK_NUMBER which is useful for
1917 NOTE_INSN_EH_REGION_{BEG,END} NOTEs. */
1919 static void
1920 reemit_notes (rtx insn)
1922 rtx note, last = insn;
1924 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1926 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
1928 enum insn_note note_type = INTVAL (XEXP (note, 0));
1930 last = emit_note_before (note_type, last);
1931 remove_note (insn, note);
1936 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
1937 static void
1938 move_insn (rtx insn)
1940 rtx last = last_scheduled_insn;
1942 if (PREV_INSN (insn) != last)
1944 basic_block bb;
1945 rtx note;
1946 int jump_p = 0;
1948 bb = BLOCK_FOR_INSN (insn);
1950 /* BB_HEAD is either LABEL or NOTE. */
1951 gcc_assert (BB_HEAD (bb) != insn);
1953 if (BB_END (bb) == insn)
1954 /* If this is last instruction in BB, move end marker one
1955 instruction up. */
1957 /* Jumps are always placed at the end of basic block. */
1958 jump_p = control_flow_insn_p (insn);
1960 gcc_assert (!jump_p
1961 || ((current_sched_info->flags & SCHED_RGN)
1962 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
1963 || (current_sched_info->flags & SCHED_EBB));
1965 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
1967 BB_END (bb) = PREV_INSN (insn);
1970 gcc_assert (BB_END (bb) != last);
1972 if (jump_p)
1973 /* We move the block note along with jump. */
1975 /* NT is needed for assertion below. */
1976 rtx nt = current_sched_info->next_tail;
1978 note = NEXT_INSN (insn);
1979 while (NOTE_NOT_BB_P (note) && note != nt)
1980 note = NEXT_INSN (note);
1982 if (note != nt
1983 && (LABEL_P (note)
1984 || BARRIER_P (note)))
1985 note = NEXT_INSN (note);
1987 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
1989 else
1990 note = insn;
1992 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
1993 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
1995 NEXT_INSN (note) = NEXT_INSN (last);
1996 PREV_INSN (NEXT_INSN (last)) = note;
1998 NEXT_INSN (last) = insn;
1999 PREV_INSN (insn) = last;
2001 bb = BLOCK_FOR_INSN (last);
2003 if (jump_p)
2005 fix_jump_move (insn);
2007 if (BLOCK_FOR_INSN (insn) != bb)
2008 move_block_after_check (insn);
2010 gcc_assert (BB_END (bb) == last);
2013 set_block_for_insn (insn, bb);
2015 /* Update BB_END, if needed. */
2016 if (BB_END (bb) == last)
2017 BB_END (bb) = insn;
2020 reemit_notes (insn);
2022 SCHED_GROUP_P (insn) = 0;
2025 /* The following structure describe an entry of the stack of choices. */
2026 struct choice_entry
2028 /* Ordinal number of the issued insn in the ready queue. */
2029 int index;
2030 /* The number of the rest insns whose issues we should try. */
2031 int rest;
2032 /* The number of issued essential insns. */
2033 int n;
2034 /* State after issuing the insn. */
2035 state_t state;
2038 /* The following array is used to implement a stack of choices used in
2039 function max_issue. */
2040 static struct choice_entry *choice_stack;
2042 /* The following variable value is number of essential insns issued on
2043 the current cycle. An insn is essential one if it changes the
2044 processors state. */
2045 static int cycle_issued_insns;
2047 /* The following variable value is maximal number of tries of issuing
2048 insns for the first cycle multipass insn scheduling. We define
2049 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
2050 need this constraint if all real insns (with non-negative codes)
2051 had reservations because in this case the algorithm complexity is
2052 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
2053 might be incomplete and such insn might occur. For such
2054 descriptions, the complexity of algorithm (without the constraint)
2055 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
2056 static int max_lookahead_tries;
2058 /* The following value is value of hook
2059 `first_cycle_multipass_dfa_lookahead' at the last call of
2060 `max_issue'. */
2061 static int cached_first_cycle_multipass_dfa_lookahead = 0;
2063 /* The following value is value of `issue_rate' at the last call of
2064 `sched_init'. */
2065 static int cached_issue_rate = 0;
2067 /* The following function returns maximal (or close to maximal) number
2068 of insns which can be issued on the same cycle and one of which
2069 insns is insns with the best rank (the first insn in READY). To
2070 make this function tries different samples of ready insns. READY
2071 is current queue `ready'. Global array READY_TRY reflects what
2072 insns are already issued in this try. MAX_POINTS is the sum of points
2073 of all instructions in READY. The function stops immediately,
2074 if it reached the such a solution, that all instruction can be issued.
2075 INDEX will contain index of the best insn in READY. The following
2076 function is used only for first cycle multipass scheduling. */
2077 static int
2078 max_issue (struct ready_list *ready, int *index, int max_points)
2080 int n, i, all, n_ready, best, delay, tries_num, points = -1;
2081 struct choice_entry *top;
2082 rtx insn;
2084 best = 0;
2085 memcpy (choice_stack->state, curr_state, dfa_state_size);
2086 top = choice_stack;
2087 top->rest = cached_first_cycle_multipass_dfa_lookahead;
2088 top->n = 0;
2089 n_ready = ready->n_ready;
2090 for (all = i = 0; i < n_ready; i++)
2091 if (!ready_try [i])
2092 all++;
2093 i = 0;
2094 tries_num = 0;
2095 for (;;)
2097 if (top->rest == 0 || i >= n_ready)
2099 if (top == choice_stack)
2100 break;
2101 if (best < top - choice_stack && ready_try [0])
2103 best = top - choice_stack;
2104 *index = choice_stack [1].index;
2105 points = top->n;
2106 if (top->n == max_points || best == all)
2107 break;
2109 i = top->index;
2110 ready_try [i] = 0;
2111 top--;
2112 memcpy (curr_state, top->state, dfa_state_size);
2114 else if (!ready_try [i])
2116 tries_num++;
2117 if (tries_num > max_lookahead_tries)
2118 break;
2119 insn = ready_element (ready, i);
2120 delay = state_transition (curr_state, insn);
2121 if (delay < 0)
2123 if (state_dead_lock_p (curr_state))
2124 top->rest = 0;
2125 else
2126 top->rest--;
2127 n = top->n;
2128 if (memcmp (top->state, curr_state, dfa_state_size) != 0)
2129 n += ISSUE_POINTS (insn);
2130 top++;
2131 top->rest = cached_first_cycle_multipass_dfa_lookahead;
2132 top->index = i;
2133 top->n = n;
2134 memcpy (top->state, curr_state, dfa_state_size);
2135 ready_try [i] = 1;
2136 i = -1;
2139 i++;
2141 while (top != choice_stack)
2143 ready_try [top->index] = 0;
2144 top--;
2146 memcpy (curr_state, choice_stack->state, dfa_state_size);
2148 if (sched_verbose >= 4)
2149 fprintf (sched_dump, ";;\t\tChoosed insn : %s; points: %d/%d\n",
2150 (*current_sched_info->print_insn) (ready_element (ready, *index),
2151 0),
2152 points, max_points);
2154 return best;
2157 /* The following function chooses insn from READY and modifies
2158 *N_READY and READY. The following function is used only for first
2159 cycle multipass scheduling. */
2161 static rtx
2162 choose_ready (struct ready_list *ready)
2164 int lookahead = 0;
2166 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
2167 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
2168 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0)))
2169 return ready_remove_first (ready);
2170 else
2172 /* Try to choose the better insn. */
2173 int index = 0, i, n;
2174 rtx insn;
2175 int more_issue, max_points, try_data = 1, try_control = 1;
2177 if (cached_first_cycle_multipass_dfa_lookahead != lookahead)
2179 cached_first_cycle_multipass_dfa_lookahead = lookahead;
2180 max_lookahead_tries = 100;
2181 for (i = 0; i < issue_rate; i++)
2182 max_lookahead_tries *= lookahead;
2184 insn = ready_element (ready, 0);
2185 if (INSN_CODE (insn) < 0)
2186 return ready_remove_first (ready);
2188 if (spec_info
2189 && spec_info->flags & (PREFER_NON_DATA_SPEC
2190 | PREFER_NON_CONTROL_SPEC))
2192 for (i = 0, n = ready->n_ready; i < n; i++)
2194 rtx x;
2195 ds_t s;
2197 x = ready_element (ready, i);
2198 s = TODO_SPEC (x);
2200 if (spec_info->flags & PREFER_NON_DATA_SPEC
2201 && !(s & DATA_SPEC))
2203 try_data = 0;
2204 if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
2205 || !try_control)
2206 break;
2209 if (spec_info->flags & PREFER_NON_CONTROL_SPEC
2210 && !(s & CONTROL_SPEC))
2212 try_control = 0;
2213 if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
2214 break;
2219 if ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2220 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
2221 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2222 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2223 (insn)))
2224 /* Discard speculative instruction that stands first in the ready
2225 list. */
2227 change_queue_index (insn, 1);
2228 return 0;
2231 max_points = ISSUE_POINTS (insn);
2232 more_issue = issue_rate - cycle_issued_insns - 1;
2234 for (i = 1; i < ready->n_ready; i++)
2236 insn = ready_element (ready, i);
2237 ready_try [i]
2238 = (INSN_CODE (insn) < 0
2239 || (!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2240 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
2241 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2242 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2243 (insn)));
2245 if (!ready_try [i] && more_issue-- > 0)
2246 max_points += ISSUE_POINTS (insn);
2249 if (max_issue (ready, &index, max_points) == 0)
2250 return ready_remove_first (ready);
2251 else
2252 return ready_remove (ready, index);
2256 /* Use forward list scheduling to rearrange insns of block pointed to by
2257 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2258 region. */
2260 void
2261 schedule_block (basic_block *target_bb, int rgn_n_insns1)
2263 struct ready_list ready;
2264 int i, first_cycle_insn_p;
2265 int can_issue_more;
2266 state_t temp_state = NULL; /* It is used for multipass scheduling. */
2267 int sort_p, advance, start_clock_var;
2269 /* Head/tail info for this block. */
2270 rtx prev_head = current_sched_info->prev_head;
2271 rtx next_tail = current_sched_info->next_tail;
2272 rtx head = NEXT_INSN (prev_head);
2273 rtx tail = PREV_INSN (next_tail);
2275 /* We used to have code to avoid getting parameters moved from hard
2276 argument registers into pseudos.
2278 However, it was removed when it proved to be of marginal benefit
2279 and caused problems because schedule_block and compute_forward_dependences
2280 had different notions of what the "head" insn was. */
2282 gcc_assert (head != tail || INSN_P (head));
2284 added_recovery_block_p = false;
2286 /* Debug info. */
2287 if (sched_verbose)
2288 dump_new_block_header (0, *target_bb, head, tail);
2290 state_reset (curr_state);
2292 /* Allocate the ready list. */
2293 readyp = &ready;
2294 ready.vec = NULL;
2295 ready_try = NULL;
2296 choice_stack = NULL;
2298 rgn_n_insns = -1;
2299 extend_ready (rgn_n_insns1 + 1);
2301 ready.first = ready.veclen - 1;
2302 ready.n_ready = 0;
2304 /* It is used for first cycle multipass scheduling. */
2305 temp_state = alloca (dfa_state_size);
2307 if (targetm.sched.md_init)
2308 targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen);
2310 /* We start inserting insns after PREV_HEAD. */
2311 last_scheduled_insn = prev_head;
2313 gcc_assert (NOTE_P (last_scheduled_insn)
2314 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
2316 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2317 queue. */
2318 q_ptr = 0;
2319 q_size = 0;
2321 insn_queue = alloca ((max_insn_queue_index + 1) * sizeof (rtx));
2322 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
2324 /* Start just before the beginning of time. */
2325 clock_var = -1;
2327 /* We need queue and ready lists and clock_var be initialized
2328 in try_ready () (which is called through init_ready_list ()). */
2329 (*current_sched_info->init_ready_list) ();
2331 /* The algorithm is O(n^2) in the number of ready insns at any given
2332 time in the worst case. Before reload we are more likely to have
2333 big lists so truncate them to a reasonable size. */
2334 if (!reload_completed && ready.n_ready > MAX_SCHED_READY_INSNS)
2336 ready_sort (&ready);
2338 /* Find first free-standing insn past MAX_SCHED_READY_INSNS. */
2339 for (i = MAX_SCHED_READY_INSNS; i < ready.n_ready; i++)
2340 if (!SCHED_GROUP_P (ready_element (&ready, i)))
2341 break;
2343 if (sched_verbose >= 2)
2345 fprintf (sched_dump,
2346 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
2347 fprintf (sched_dump,
2348 ";;\t\t before reload => truncated to %d insns\n", i);
2351 /* Delay all insns past it for 1 cycle. */
2352 while (i < ready.n_ready)
2353 queue_insn (ready_remove (&ready, i), 1);
2356 /* Now we can restore basic block notes and maintain precise cfg. */
2357 restore_bb_notes (*target_bb);
2359 last_clock_var = -1;
2361 advance = 0;
2363 sort_p = TRUE;
2364 /* Loop until all the insns in BB are scheduled. */
2365 while ((*current_sched_info->schedule_more_p) ())
2369 start_clock_var = clock_var;
2371 clock_var++;
2373 advance_one_cycle ();
2375 /* Add to the ready list all pending insns that can be issued now.
2376 If there are no ready insns, increment clock until one
2377 is ready and add all pending insns at that point to the ready
2378 list. */
2379 queue_to_ready (&ready);
2381 gcc_assert (ready.n_ready);
2383 if (sched_verbose >= 2)
2385 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
2386 debug_ready_list (&ready);
2388 advance -= clock_var - start_clock_var;
2390 while (advance > 0);
2392 if (sort_p)
2394 /* Sort the ready list based on priority. */
2395 ready_sort (&ready);
2397 if (sched_verbose >= 2)
2399 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
2400 debug_ready_list (&ready);
2404 /* Allow the target to reorder the list, typically for
2405 better instruction bundling. */
2406 if (sort_p && targetm.sched.reorder
2407 && (ready.n_ready == 0
2408 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2409 can_issue_more =
2410 targetm.sched.reorder (sched_dump, sched_verbose,
2411 ready_lastpos (&ready),
2412 &ready.n_ready, clock_var);
2413 else
2414 can_issue_more = issue_rate;
2416 first_cycle_insn_p = 1;
2417 cycle_issued_insns = 0;
2418 for (;;)
2420 rtx insn;
2421 int cost;
2422 bool asm_p = false;
2424 if (sched_verbose >= 2)
2426 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
2427 clock_var);
2428 debug_ready_list (&ready);
2431 if (ready.n_ready == 0
2432 && can_issue_more
2433 && reload_completed)
2435 /* Allow scheduling insns directly from the queue in case
2436 there's nothing better to do (ready list is empty) but
2437 there are still vacant dispatch slots in the current cycle. */
2438 if (sched_verbose >= 6)
2439 fprintf(sched_dump,";;\t\tSecond chance\n");
2440 memcpy (temp_state, curr_state, dfa_state_size);
2441 if (early_queue_to_ready (temp_state, &ready))
2442 ready_sort (&ready);
2445 if (ready.n_ready == 0 || !can_issue_more
2446 || state_dead_lock_p (curr_state)
2447 || !(*current_sched_info->schedule_more_p) ())
2448 break;
2450 /* Select and remove the insn from the ready list. */
2451 if (sort_p)
2453 insn = choose_ready (&ready);
2454 if (!insn)
2455 continue;
2457 else
2458 insn = ready_remove_first (&ready);
2460 if (targetm.sched.dfa_new_cycle
2461 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
2462 insn, last_clock_var,
2463 clock_var, &sort_p))
2464 /* SORT_P is used by the target to override sorting
2465 of the ready list. This is needed when the target
2466 has modified its internal structures expecting that
2467 the insn will be issued next. As we need the insn
2468 to have the highest priority (so it will be returned by
2469 the ready_remove_first call above), we invoke
2470 ready_add (&ready, insn, true).
2471 But, still, there is one issue: INSN can be later
2472 discarded by scheduler's front end through
2473 current_sched_info->can_schedule_ready_p, hence, won't
2474 be issued next. */
2476 ready_add (&ready, insn, true);
2477 break;
2480 sort_p = TRUE;
2481 memcpy (temp_state, curr_state, dfa_state_size);
2482 if (recog_memoized (insn) < 0)
2484 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
2485 || asm_noperands (PATTERN (insn)) >= 0);
2486 if (!first_cycle_insn_p && asm_p)
2487 /* This is asm insn which is tryed to be issued on the
2488 cycle not first. Issue it on the next cycle. */
2489 cost = 1;
2490 else
2491 /* A USE insn, or something else we don't need to
2492 understand. We can't pass these directly to
2493 state_transition because it will trigger a
2494 fatal error for unrecognizable insns. */
2495 cost = 0;
2497 else
2499 cost = state_transition (temp_state, insn);
2500 if (cost < 0)
2501 cost = 0;
2502 else if (cost == 0)
2503 cost = 1;
2506 if (cost >= 1)
2508 queue_insn (insn, cost);
2509 if (SCHED_GROUP_P (insn))
2511 advance = cost;
2512 break;
2515 continue;
2518 if (current_sched_info->can_schedule_ready_p
2519 && ! (*current_sched_info->can_schedule_ready_p) (insn))
2520 /* We normally get here only if we don't want to move
2521 insn from the split block. */
2523 TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
2524 continue;
2527 /* DECISION is made. */
2529 if (TODO_SPEC (insn) & SPECULATIVE)
2530 generate_recovery_code (insn);
2532 if (control_flow_insn_p (last_scheduled_insn)
2533 /* This is used to to switch basic blocks by request
2534 from scheduler front-end (actually, sched-ebb.c only).
2535 This is used to process blocks with single fallthru
2536 edge. If succeeding block has jump, it [jump] will try
2537 move at the end of current bb, thus corrupting CFG. */
2538 || current_sched_info->advance_target_bb (*target_bb, insn))
2540 *target_bb = current_sched_info->advance_target_bb
2541 (*target_bb, 0);
2543 if (sched_verbose)
2545 rtx x;
2547 x = next_real_insn (last_scheduled_insn);
2548 gcc_assert (x);
2549 dump_new_block_header (1, *target_bb, x, tail);
2552 last_scheduled_insn = bb_note (*target_bb);
2555 /* Update counters, etc in the scheduler's front end. */
2556 (*current_sched_info->begin_schedule_ready) (insn,
2557 last_scheduled_insn);
2559 move_insn (insn);
2560 last_scheduled_insn = insn;
2562 if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
2564 cycle_issued_insns++;
2565 memcpy (curr_state, temp_state, dfa_state_size);
2568 if (targetm.sched.variable_issue)
2569 can_issue_more =
2570 targetm.sched.variable_issue (sched_dump, sched_verbose,
2571 insn, can_issue_more);
2572 /* A naked CLOBBER or USE generates no instruction, so do
2573 not count them against the issue rate. */
2574 else if (GET_CODE (PATTERN (insn)) != USE
2575 && GET_CODE (PATTERN (insn)) != CLOBBER)
2576 can_issue_more--;
2578 advance = schedule_insn (insn);
2580 /* After issuing an asm insn we should start a new cycle. */
2581 if (advance == 0 && asm_p)
2582 advance = 1;
2583 if (advance != 0)
2584 break;
2586 first_cycle_insn_p = 0;
2588 /* Sort the ready list based on priority. This must be
2589 redone here, as schedule_insn may have readied additional
2590 insns that will not be sorted correctly. */
2591 if (ready.n_ready > 0)
2592 ready_sort (&ready);
2594 if (targetm.sched.reorder2
2595 && (ready.n_ready == 0
2596 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2598 can_issue_more =
2599 targetm.sched.reorder2 (sched_dump, sched_verbose,
2600 ready.n_ready
2601 ? ready_lastpos (&ready) : NULL,
2602 &ready.n_ready, clock_var);
2607 /* Debug info. */
2608 if (sched_verbose)
2610 fprintf (sched_dump, ";;\tReady list (final): ");
2611 debug_ready_list (&ready);
2614 if (current_sched_info->queue_must_finish_empty)
2615 /* Sanity check -- queue must be empty now. Meaningless if region has
2616 multiple bbs. */
2617 gcc_assert (!q_size && !ready.n_ready);
2618 else
2620 /* We must maintain QUEUE_INDEX between blocks in region. */
2621 for (i = ready.n_ready - 1; i >= 0; i--)
2623 rtx x;
2625 x = ready_element (&ready, i);
2626 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2627 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2630 if (q_size)
2631 for (i = 0; i <= max_insn_queue_index; i++)
2633 rtx link;
2634 for (link = insn_queue[i]; link; link = XEXP (link, 1))
2636 rtx x;
2638 x = XEXP (link, 0);
2639 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2640 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2642 free_INSN_LIST_list (&insn_queue[i]);
2646 if (!current_sched_info->queue_must_finish_empty
2647 || added_recovery_block_p)
2649 /* INSN_TICK (minimum clock tick at which the insn becomes
2650 ready) may be not correct for the insn in the subsequent
2651 blocks of the region. We should use a correct value of
2652 `clock_var' or modify INSN_TICK. It is better to keep
2653 clock_var value equal to 0 at the start of a basic block.
2654 Therefore we modify INSN_TICK here. */
2655 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
2658 #ifdef ENABLE_CHECKING
2659 /* After the reload the ia64 backend doesn't maintain BB_END, so
2660 if we want to check anything, better do it now.
2661 And it already clobbered previously scheduled code. */
2662 if (reload_completed)
2663 check_cfg (BB_HEAD (BLOCK_FOR_INSN (prev_head)), 0);
2664 #endif
2666 if (targetm.sched.md_finish)
2667 targetm.sched.md_finish (sched_dump, sched_verbose);
2669 /* Update head/tail boundaries. */
2670 head = NEXT_INSN (prev_head);
2671 tail = last_scheduled_insn;
2673 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
2674 previously found among the insns. Insert them at the beginning
2675 of the insns. */
2676 if (note_list != 0)
2678 basic_block head_bb = BLOCK_FOR_INSN (head);
2679 rtx note_head = note_list;
2681 while (PREV_INSN (note_head))
2683 set_block_for_insn (note_head, head_bb);
2684 note_head = PREV_INSN (note_head);
2686 /* In the above cycle we've missed this note: */
2687 set_block_for_insn (note_head, head_bb);
2689 PREV_INSN (note_head) = PREV_INSN (head);
2690 NEXT_INSN (PREV_INSN (head)) = note_head;
2691 PREV_INSN (head) = note_list;
2692 NEXT_INSN (note_list) = head;
2693 head = note_head;
2696 /* Debugging. */
2697 if (sched_verbose)
2699 fprintf (sched_dump, ";; total time = %d\n;; new head = %d\n",
2700 clock_var, INSN_UID (head));
2701 fprintf (sched_dump, ";; new tail = %d\n\n",
2702 INSN_UID (tail));
2705 current_sched_info->head = head;
2706 current_sched_info->tail = tail;
2708 free (ready.vec);
2710 free (ready_try);
2711 for (i = 0; i <= rgn_n_insns; i++)
2712 free (choice_stack [i].state);
2713 free (choice_stack);
2716 /* Set_priorities: compute priority of each insn in the block. */
2719 set_priorities (rtx head, rtx tail)
2721 rtx insn;
2722 int n_insn;
2723 int sched_max_insns_priority =
2724 current_sched_info->sched_max_insns_priority;
2725 rtx prev_head;
2727 if (head == tail && (! INSN_P (head)))
2728 return 0;
2730 n_insn = 0;
2732 prev_head = PREV_INSN (head);
2733 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
2735 if (!INSN_P (insn))
2736 continue;
2738 n_insn++;
2739 (void) priority (insn);
2741 if (INSN_PRIORITY_KNOWN (insn))
2742 sched_max_insns_priority =
2743 MAX (sched_max_insns_priority, INSN_PRIORITY (insn));
2746 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
2748 return n_insn;
2751 /* Next LUID to assign to an instruction. */
2752 static int luid;
2754 /* Initialize some global state for the scheduler. */
2756 void
2757 sched_init (void)
2759 basic_block b;
2760 rtx insn;
2761 int i;
2763 /* Switch to working copy of sched_info. */
2764 memcpy (&current_sched_info_var, current_sched_info,
2765 sizeof (current_sched_info_var));
2766 current_sched_info = &current_sched_info_var;
2768 /* Disable speculative loads in their presence if cc0 defined. */
2769 #ifdef HAVE_cc0
2770 flag_schedule_speculative_load = 0;
2771 #endif
2773 /* Set dump and sched_verbose for the desired debugging output. If no
2774 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
2775 For -fsched-verbose=N, N>=10, print everything to stderr. */
2776 sched_verbose = sched_verbose_param;
2777 if (sched_verbose_param == 0 && dump_file)
2778 sched_verbose = 1;
2779 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
2780 ? stderr : dump_file);
2782 /* Initialize SPEC_INFO. */
2783 if (targetm.sched.set_sched_flags)
2785 spec_info = &spec_info_var;
2786 targetm.sched.set_sched_flags (spec_info);
2787 if (current_sched_info->flags & DO_SPECULATION)
2788 spec_info->weakness_cutoff =
2789 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
2790 else
2791 /* So we won't read anything accidentally. */
2792 spec_info = 0;
2793 #ifdef ENABLE_CHECKING
2794 check_sched_flags ();
2795 #endif
2797 else
2798 /* So we won't read anything accidentally. */
2799 spec_info = 0;
2801 /* Initialize issue_rate. */
2802 if (targetm.sched.issue_rate)
2803 issue_rate = targetm.sched.issue_rate ();
2804 else
2805 issue_rate = 1;
2807 if (cached_issue_rate != issue_rate)
2809 cached_issue_rate = issue_rate;
2810 /* To invalidate max_lookahead_tries: */
2811 cached_first_cycle_multipass_dfa_lookahead = 0;
2814 old_max_uid = 0;
2815 h_i_d = 0;
2816 extend_h_i_d ();
2818 for (i = 0; i < old_max_uid; i++)
2820 h_i_d[i].cost = -1;
2821 h_i_d[i].todo_spec = HARD_DEP;
2822 h_i_d[i].queue_index = QUEUE_NOWHERE;
2823 h_i_d[i].tick = INVALID_TICK;
2824 h_i_d[i].inter_tick = INVALID_TICK;
2827 if (targetm.sched.init_dfa_pre_cycle_insn)
2828 targetm.sched.init_dfa_pre_cycle_insn ();
2830 if (targetm.sched.init_dfa_post_cycle_insn)
2831 targetm.sched.init_dfa_post_cycle_insn ();
2833 dfa_start ();
2834 dfa_state_size = state_size ();
2835 curr_state = xmalloc (dfa_state_size);
2837 h_i_d[0].luid = 0;
2838 luid = 1;
2839 FOR_EACH_BB (b)
2840 for (insn = BB_HEAD (b); ; insn = NEXT_INSN (insn))
2842 INSN_LUID (insn) = luid;
2844 /* Increment the next luid, unless this is a note. We don't
2845 really need separate IDs for notes and we don't want to
2846 schedule differently depending on whether or not there are
2847 line-number notes, i.e., depending on whether or not we're
2848 generating debugging information. */
2849 if (!NOTE_P (insn))
2850 ++luid;
2852 if (insn == BB_END (b))
2853 break;
2856 init_dependency_caches (luid);
2858 init_alias_analysis ();
2860 line_note_head = 0;
2861 old_last_basic_block = 0;
2862 glat_start = 0;
2863 glat_end = 0;
2864 extend_bb (0);
2866 if (current_sched_info->flags & USE_GLAT)
2867 init_glat ();
2869 /* Compute INSN_REG_WEIGHT for all blocks. We must do this before
2870 removing death notes. */
2871 FOR_EACH_BB_REVERSE (b)
2872 find_insn_reg_weight (b);
2874 if (targetm.sched.md_init_global)
2875 targetm.sched.md_init_global (sched_dump, sched_verbose, old_max_uid);
2877 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
2878 before_recovery = 0;
2880 #ifdef ENABLE_CHECKING
2881 /* This is used preferably for finding bugs in check_cfg () itself. */
2882 check_cfg (0, 0);
2883 #endif
2886 /* Free global data used during insn scheduling. */
2888 void
2889 sched_finish (void)
2891 free (h_i_d);
2892 free (curr_state);
2893 dfa_finish ();
2894 free_dependency_caches ();
2895 end_alias_analysis ();
2896 free (line_note_head);
2897 free_glat ();
2899 if (targetm.sched.md_finish_global)
2900 targetm.sched.md_finish_global (sched_dump, sched_verbose);
2902 if (spec_info && spec_info->dump)
2904 char c = reload_completed ? 'a' : 'b';
2906 fprintf (spec_info->dump,
2907 ";; %s:\n", current_function_name ());
2909 fprintf (spec_info->dump,
2910 ";; Procedure %cr-begin-data-spec motions == %d\n",
2911 c, nr_begin_data);
2912 fprintf (spec_info->dump,
2913 ";; Procedure %cr-be-in-data-spec motions == %d\n",
2914 c, nr_be_in_data);
2915 fprintf (spec_info->dump,
2916 ";; Procedure %cr-begin-control-spec motions == %d\n",
2917 c, nr_begin_control);
2918 fprintf (spec_info->dump,
2919 ";; Procedure %cr-be-in-control-spec motions == %d\n",
2920 c, nr_be_in_control);
2923 #ifdef ENABLE_CHECKING
2924 /* After reload ia64 backend clobbers CFG, so can't check anything. */
2925 if (!reload_completed)
2926 check_cfg (0, 0);
2927 #endif
2929 current_sched_info = NULL;
2932 /* Fix INSN_TICKs of the instructions in the current block as well as
2933 INSN_TICKs of their dependents.
2934 HEAD and TAIL are the begin and the end of the current scheduled block. */
2935 static void
2936 fix_inter_tick (rtx head, rtx tail)
2938 /* Set of instructions with corrected INSN_TICK. */
2939 bitmap_head processed;
2940 int next_clock = clock_var + 1;
2942 bitmap_initialize (&processed, 0);
2944 /* Iterates over scheduled instructions and fix their INSN_TICKs and
2945 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
2946 across different blocks. */
2947 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
2949 if (INSN_P (head))
2951 int tick;
2952 rtx link;
2954 tick = INSN_TICK (head);
2955 gcc_assert (tick >= MIN_TICK);
2957 /* Fix INSN_TICK of instruction from just scheduled block. */
2958 if (!bitmap_bit_p (&processed, INSN_LUID (head)))
2960 bitmap_set_bit (&processed, INSN_LUID (head));
2961 tick -= next_clock;
2963 if (tick < MIN_TICK)
2964 tick = MIN_TICK;
2966 INSN_TICK (head) = tick;
2969 for (link = INSN_DEPEND (head); link; link = XEXP (link, 1))
2971 rtx next;
2973 next = XEXP (link, 0);
2974 tick = INSN_TICK (next);
2976 if (tick != INVALID_TICK
2977 /* If NEXT has its INSN_TICK calculated, fix it.
2978 If not - it will be properly calculated from
2979 scratch later in fix_tick_ready. */
2980 && !bitmap_bit_p (&processed, INSN_LUID (next)))
2982 bitmap_set_bit (&processed, INSN_LUID (next));
2983 tick -= next_clock;
2985 if (tick < MIN_TICK)
2986 tick = MIN_TICK;
2988 if (tick > INTER_TICK (next))
2989 INTER_TICK (next) = tick;
2990 else
2991 tick = INTER_TICK (next);
2993 INSN_TICK (next) = tick;
2998 bitmap_clear (&processed);
3001 /* Check if NEXT is ready to be added to the ready or queue list.
3002 If "yes", add it to the proper list.
3003 Returns:
3004 -1 - is not ready yet,
3005 0 - added to the ready list,
3006 0 < N - queued for N cycles. */
3008 try_ready (rtx next)
3010 ds_t old_ts, *ts;
3011 rtx link;
3013 ts = &TODO_SPEC (next);
3014 old_ts = *ts;
3016 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
3017 && ((old_ts & HARD_DEP)
3018 || (old_ts & SPECULATIVE)));
3020 if (!(current_sched_info->flags & DO_SPECULATION))
3022 if (!LOG_LINKS (next))
3023 *ts &= ~HARD_DEP;
3025 else
3027 *ts &= ~SPECULATIVE & ~HARD_DEP;
3029 link = LOG_LINKS (next);
3030 if (link)
3032 /* LOG_LINKS are maintained sorted.
3033 So if DEP_STATUS of the first dep is SPECULATIVE,
3034 than all other deps are speculative too. */
3035 if (DEP_STATUS (link) & SPECULATIVE)
3037 /* Now we've got NEXT with speculative deps only.
3038 1. Look at the deps to see what we have to do.
3039 2. Check if we can do 'todo'. */
3040 *ts = DEP_STATUS (link) & SPECULATIVE;
3041 while ((link = XEXP (link, 1)))
3042 *ts = ds_merge (*ts, DEP_STATUS (link) & SPECULATIVE);
3044 if (dep_weak (*ts) < spec_info->weakness_cutoff)
3045 /* Too few points. */
3046 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3048 else
3049 *ts |= HARD_DEP;
3053 if (*ts & HARD_DEP)
3054 gcc_assert (*ts == old_ts
3055 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
3056 else if (current_sched_info->new_ready)
3057 *ts = current_sched_info->new_ready (next, *ts);
3059 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
3060 have its original pattern or changed (speculative) one. This is due
3061 to changing ebb in region scheduling.
3062 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
3063 has speculative pattern.
3065 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
3066 control-speculative NEXT could have been discarded by sched-rgn.c
3067 (the same case as when discarded by can_schedule_ready_p ()). */
3069 if ((*ts & SPECULATIVE)
3070 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
3071 need to change anything. */
3072 && *ts != old_ts)
3074 int res;
3075 rtx new_pat;
3077 gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
3079 res = speculate_insn (next, *ts, &new_pat);
3081 switch (res)
3083 case -1:
3084 /* It would be nice to change DEP_STATUS of all dependences,
3085 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
3086 so we won't reanalyze anything. */
3087 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3088 break;
3090 case 0:
3091 /* We follow the rule, that every speculative insn
3092 has non-null ORIG_PAT. */
3093 if (!ORIG_PAT (next))
3094 ORIG_PAT (next) = PATTERN (next);
3095 break;
3097 case 1:
3098 if (!ORIG_PAT (next))
3099 /* If we gonna to overwrite the original pattern of insn,
3100 save it. */
3101 ORIG_PAT (next) = PATTERN (next);
3103 change_pattern (next, new_pat);
3104 break;
3106 default:
3107 gcc_unreachable ();
3111 /* We need to restore pattern only if (*ts == 0), because otherwise it is
3112 either correct (*ts & SPECULATIVE),
3113 or we simply don't care (*ts & HARD_DEP). */
3115 gcc_assert (!ORIG_PAT (next)
3116 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
3118 if (*ts & HARD_DEP)
3120 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
3121 control-speculative NEXT could have been discarded by sched-rgn.c
3122 (the same case as when discarded by can_schedule_ready_p ()). */
3123 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
3125 change_queue_index (next, QUEUE_NOWHERE);
3126 return -1;
3128 else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
3129 /* We should change pattern of every previously speculative
3130 instruction - and we determine if NEXT was speculative by using
3131 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
3132 pat too, so skip them. */
3134 change_pattern (next, ORIG_PAT (next));
3135 ORIG_PAT (next) = 0;
3138 if (sched_verbose >= 2)
3140 int s = TODO_SPEC (next);
3142 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
3143 (*current_sched_info->print_insn) (next, 0));
3145 if (spec_info && spec_info->dump)
3147 if (s & BEGIN_DATA)
3148 fprintf (spec_info->dump, "; data-spec;");
3149 if (s & BEGIN_CONTROL)
3150 fprintf (spec_info->dump, "; control-spec;");
3151 if (s & BE_IN_CONTROL)
3152 fprintf (spec_info->dump, "; in-control-spec;");
3155 fprintf (sched_dump, "\n");
3158 adjust_priority (next);
3160 return fix_tick_ready (next);
3163 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
3164 static int
3165 fix_tick_ready (rtx next)
3167 rtx link;
3168 int tick, delay;
3170 link = RESOLVED_DEPS (next);
3172 if (link)
3174 int full_p;
3176 tick = INSN_TICK (next);
3177 /* if tick is not equal to INVALID_TICK, then update
3178 INSN_TICK of NEXT with the most recent resolved dependence
3179 cost. Otherwise, recalculate from scratch. */
3180 full_p = tick == INVALID_TICK;
3183 rtx pro;
3184 int tick1;
3186 pro = XEXP (link, 0);
3187 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
3189 tick1 = INSN_TICK (pro) + insn_cost (pro, link, next);
3190 if (tick1 > tick)
3191 tick = tick1;
3193 while ((link = XEXP (link, 1)) && full_p);
3195 else
3196 tick = -1;
3198 INSN_TICK (next) = tick;
3200 delay = tick - clock_var;
3201 if (delay <= 0)
3202 delay = QUEUE_READY;
3204 change_queue_index (next, delay);
3206 return delay;
3209 /* Move NEXT to the proper queue list with (DELAY >= 1),
3210 or add it to the ready list (DELAY == QUEUE_READY),
3211 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
3212 static void
3213 change_queue_index (rtx next, int delay)
3215 int i = QUEUE_INDEX (next);
3217 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
3218 && delay != 0);
3219 gcc_assert (i != QUEUE_SCHEDULED);
3221 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
3222 || (delay < 0 && delay == i))
3223 /* We have nothing to do. */
3224 return;
3226 /* Remove NEXT from wherever it is now. */
3227 if (i == QUEUE_READY)
3228 ready_remove_insn (next);
3229 else if (i >= 0)
3230 queue_remove (next);
3232 /* Add it to the proper place. */
3233 if (delay == QUEUE_READY)
3234 ready_add (readyp, next, false);
3235 else if (delay >= 1)
3236 queue_insn (next, delay);
3238 if (sched_verbose >= 2)
3240 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
3241 (*current_sched_info->print_insn) (next, 0));
3243 if (delay == QUEUE_READY)
3244 fprintf (sched_dump, " into ready\n");
3245 else if (delay >= 1)
3246 fprintf (sched_dump, " into queue with cost=%d\n", delay);
3247 else
3248 fprintf (sched_dump, " removed from ready or queue lists\n");
3252 /* INSN is being scheduled. Resolve the dependence between INSN and NEXT. */
3253 static void
3254 resolve_dep (rtx next, rtx insn)
3256 rtx dep;
3258 INSN_DEP_COUNT (next)--;
3260 dep = remove_list_elem (insn, &LOG_LINKS (next));
3261 XEXP (dep, 1) = RESOLVED_DEPS (next);
3262 RESOLVED_DEPS (next) = dep;
3264 gcc_assert ((INSN_DEP_COUNT (next) != 0 || !LOG_LINKS (next))
3265 && (LOG_LINKS (next) || INSN_DEP_COUNT (next) == 0));
3268 /* Extend H_I_D data. */
3269 static void
3270 extend_h_i_d (void)
3272 /* We use LUID 0 for the fake insn (UID 0) which holds dependencies for
3273 pseudos which do not cross calls. */
3274 int new_max_uid = get_max_uid() + 1;
3276 h_i_d = xrecalloc (h_i_d, new_max_uid, old_max_uid, sizeof (*h_i_d));
3277 old_max_uid = new_max_uid;
3279 if (targetm.sched.h_i_d_extended)
3280 targetm.sched.h_i_d_extended ();
3283 /* Extend READY, READY_TRY and CHOICE_STACK arrays.
3284 N_NEW_INSNS is the number of additional elements to allocate. */
3285 static void
3286 extend_ready (int n_new_insns)
3288 int i;
3290 readyp->veclen = rgn_n_insns + n_new_insns + 1 + issue_rate;
3291 readyp->vec = XRESIZEVEC (rtx, readyp->vec, readyp->veclen);
3293 ready_try = xrecalloc (ready_try, rgn_n_insns + n_new_insns + 1,
3294 rgn_n_insns + 1, sizeof (char));
3296 rgn_n_insns += n_new_insns;
3298 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
3299 rgn_n_insns + 1);
3301 for (i = rgn_n_insns; n_new_insns--; i--)
3302 choice_stack[i].state = xmalloc (dfa_state_size);
3305 /* Extend global scheduler structures (those, that live across calls to
3306 schedule_block) to include information about just emitted INSN. */
3307 static void
3308 extend_global (rtx insn)
3310 gcc_assert (INSN_P (insn));
3311 /* These structures have scheduler scope. */
3312 extend_h_i_d ();
3313 init_h_i_d (insn);
3315 extend_dependency_caches (1, 0);
3318 /* Extends global and local scheduler structures to include information
3319 about just emitted INSN. */
3320 static void
3321 extend_all (rtx insn)
3323 extend_global (insn);
3325 /* These structures have block scope. */
3326 extend_ready (1);
3328 (*current_sched_info->add_remove_insn) (insn, 0);
3331 /* Initialize h_i_d entry of the new INSN with default values.
3332 Values, that are not explicitly initialized here, hold zero. */
3333 static void
3334 init_h_i_d (rtx insn)
3336 INSN_LUID (insn) = luid++;
3337 INSN_COST (insn) = -1;
3338 TODO_SPEC (insn) = HARD_DEP;
3339 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
3340 INSN_TICK (insn) = INVALID_TICK;
3341 INTER_TICK (insn) = INVALID_TICK;
3342 find_insn_reg_weight1 (insn);
3345 /* Generates recovery code for INSN. */
3346 static void
3347 generate_recovery_code (rtx insn)
3349 if (TODO_SPEC (insn) & BEGIN_SPEC)
3350 begin_speculative_block (insn);
3352 /* Here we have insn with no dependencies to
3353 instructions other then CHECK_SPEC ones. */
3355 if (TODO_SPEC (insn) & BE_IN_SPEC)
3356 add_to_speculative_block (insn);
3359 /* Helper function.
3360 Tries to add speculative dependencies of type FS between instructions
3361 in LINK list and TWIN. */
3362 static void
3363 process_insn_depend_be_in_spec (rtx link, rtx twin, ds_t fs)
3365 for (; link; link = XEXP (link, 1))
3367 ds_t ds;
3368 rtx consumer;
3370 consumer = XEXP (link, 0);
3372 ds = DEP_STATUS (link);
3374 if (/* If we want to create speculative dep. */
3376 /* And we can do that because this is a true dep. */
3377 && (ds & DEP_TYPES) == DEP_TRUE)
3379 gcc_assert (!(ds & BE_IN_SPEC));
3381 if (/* If this dep can be overcome with 'begin speculation'. */
3382 ds & BEGIN_SPEC)
3383 /* Then we have a choice: keep the dep 'begin speculative'
3384 or transform it into 'be in speculative'. */
3386 if (/* In try_ready we assert that if insn once became ready
3387 it can be removed from the ready (or queue) list only
3388 due to backend decision. Hence we can't let the
3389 probability of the speculative dep to decrease. */
3390 dep_weak (ds) <= dep_weak (fs))
3391 /* Transform it to be in speculative. */
3392 ds = (ds & ~BEGIN_SPEC) | fs;
3394 else
3395 /* Mark the dep as 'be in speculative'. */
3396 ds |= fs;
3399 add_back_forw_dep (consumer, twin, REG_NOTE_KIND (link), ds);
3403 /* Generates recovery code for BEGIN speculative INSN. */
3404 static void
3405 begin_speculative_block (rtx insn)
3407 if (TODO_SPEC (insn) & BEGIN_DATA)
3408 nr_begin_data++;
3409 if (TODO_SPEC (insn) & BEGIN_CONTROL)
3410 nr_begin_control++;
3412 create_check_block_twin (insn, false);
3414 TODO_SPEC (insn) &= ~BEGIN_SPEC;
3417 /* Generates recovery code for BE_IN speculative INSN. */
3418 static void
3419 add_to_speculative_block (rtx insn)
3421 ds_t ts;
3422 rtx link, twins = NULL;
3424 ts = TODO_SPEC (insn);
3425 gcc_assert (!(ts & ~BE_IN_SPEC));
3427 if (ts & BE_IN_DATA)
3428 nr_be_in_data++;
3429 if (ts & BE_IN_CONTROL)
3430 nr_be_in_control++;
3432 TODO_SPEC (insn) &= ~BE_IN_SPEC;
3433 gcc_assert (!TODO_SPEC (insn));
3435 DONE_SPEC (insn) |= ts;
3437 /* First we convert all simple checks to branchy. */
3438 for (link = LOG_LINKS (insn); link;)
3440 rtx check;
3442 check = XEXP (link, 0);
3444 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
3446 create_check_block_twin (check, true);
3447 link = LOG_LINKS (insn);
3449 else
3450 link = XEXP (link, 1);
3453 clear_priorities (insn);
3457 rtx link, check, twin;
3458 basic_block rec;
3460 link = LOG_LINKS (insn);
3461 gcc_assert (!(DEP_STATUS (link) & BEGIN_SPEC)
3462 && (DEP_STATUS (link) & BE_IN_SPEC)
3463 && (DEP_STATUS (link) & DEP_TYPES) == DEP_TRUE);
3465 check = XEXP (link, 0);
3467 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
3468 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
3470 rec = BLOCK_FOR_INSN (check);
3472 twin = emit_insn_before (copy_rtx (PATTERN (insn)), BB_END (rec));
3473 extend_global (twin);
3475 RESOLVED_DEPS (twin) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn));
3477 if (sched_verbose && spec_info->dump)
3478 /* INSN_BB (insn) isn't determined for twin insns yet.
3479 So we can't use current_sched_info->print_insn. */
3480 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3481 INSN_UID (twin), rec->index);
3483 twins = alloc_INSN_LIST (twin, twins);
3485 /* Add dependences between TWIN and all appropriate
3486 instructions from REC. */
3489 add_back_forw_dep (twin, check, REG_DEP_TRUE, DEP_TRUE);
3493 link = XEXP (link, 1);
3494 if (link)
3496 check = XEXP (link, 0);
3497 if (BLOCK_FOR_INSN (check) == rec)
3498 break;
3500 else
3501 break;
3503 while (1);
3505 while (link);
3507 process_insn_depend_be_in_spec (INSN_DEPEND (insn), twin, ts);
3509 for (link = LOG_LINKS (insn); link;)
3511 check = XEXP (link, 0);
3513 if (BLOCK_FOR_INSN (check) == rec)
3515 delete_back_forw_dep (insn, check);
3516 link = LOG_LINKS (insn);
3518 else
3519 link = XEXP (link, 1);
3522 while (LOG_LINKS (insn));
3524 /* We can't add the dependence between insn and twin earlier because
3525 that would make twin appear in the INSN_DEPEND (insn). */
3526 while (twins)
3528 rtx twin;
3530 twin = XEXP (twins, 0);
3531 calc_priorities (twin);
3532 add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
3534 twin = XEXP (twins, 1);
3535 free_INSN_LIST_node (twins);
3536 twins = twin;
3540 /* Extends and fills with zeros (only the new part) array pointed to by P. */
3541 void *
3542 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
3544 gcc_assert (new_nmemb >= old_nmemb);
3545 p = XRESIZEVAR (void, p, new_nmemb * size);
3546 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
3547 return p;
3550 /* Return the probability of speculation success for the speculation
3551 status DS. */
3552 static dw_t
3553 dep_weak (ds_t ds)
3555 ds_t res = 1, dt;
3556 int n = 0;
3558 dt = FIRST_SPEC_TYPE;
3561 if (ds & dt)
3563 res *= (ds_t) get_dep_weak (ds, dt);
3564 n++;
3567 if (dt == LAST_SPEC_TYPE)
3568 break;
3569 dt <<= SPEC_TYPE_SHIFT;
3571 while (1);
3573 gcc_assert (n);
3574 while (--n)
3575 res /= MAX_DEP_WEAK;
3577 if (res < MIN_DEP_WEAK)
3578 res = MIN_DEP_WEAK;
3580 gcc_assert (res <= MAX_DEP_WEAK);
3582 return (dw_t) res;
3585 /* Helper function.
3586 Find fallthru edge from PRED. */
3587 static edge
3588 find_fallthru_edge (basic_block pred)
3590 edge e;
3591 edge_iterator ei;
3592 basic_block succ;
3594 succ = pred->next_bb;
3595 gcc_assert (succ->prev_bb == pred);
3597 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
3599 FOR_EACH_EDGE (e, ei, pred->succs)
3600 if (e->flags & EDGE_FALLTHRU)
3602 gcc_assert (e->dest == succ);
3603 return e;
3606 else
3608 FOR_EACH_EDGE (e, ei, succ->preds)
3609 if (e->flags & EDGE_FALLTHRU)
3611 gcc_assert (e->src == pred);
3612 return e;
3616 return NULL;
3619 /* Initialize BEFORE_RECOVERY variable. */
3620 static void
3621 init_before_recovery (void)
3623 basic_block last;
3624 edge e;
3626 last = EXIT_BLOCK_PTR->prev_bb;
3627 e = find_fallthru_edge (last);
3629 if (e)
3631 /* We create two basic blocks:
3632 1. Single instruction block is inserted right after E->SRC
3633 and has jump to
3634 2. Empty block right before EXIT_BLOCK.
3635 Between these two blocks recovery blocks will be emitted. */
3637 basic_block single, empty;
3638 rtx x, label;
3640 single = create_empty_bb (last);
3641 empty = create_empty_bb (single);
3643 single->count = last->count;
3644 empty->count = last->count;
3645 single->frequency = last->frequency;
3646 empty->frequency = last->frequency;
3647 BB_COPY_PARTITION (single, last);
3648 BB_COPY_PARTITION (empty, last);
3650 redirect_edge_succ (e, single);
3651 make_single_succ_edge (single, empty, 0);
3652 make_single_succ_edge (empty, EXIT_BLOCK_PTR,
3653 EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
3655 label = block_label (empty);
3656 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
3657 JUMP_LABEL (x) = label;
3658 LABEL_NUSES (label)++;
3659 extend_global (x);
3661 emit_barrier_after (x);
3663 add_block (empty, 0);
3664 add_block (single, 0);
3666 before_recovery = single;
3668 if (sched_verbose >= 2 && spec_info->dump)
3669 fprintf (spec_info->dump,
3670 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
3671 last->index, single->index, empty->index);
3673 else
3674 before_recovery = last;
3677 /* Returns new recovery block. */
3678 static basic_block
3679 create_recovery_block (void)
3681 rtx label;
3682 basic_block rec;
3684 added_recovery_block_p = true;
3686 if (!before_recovery)
3687 init_before_recovery ();
3689 label = gen_label_rtx ();
3690 gcc_assert (BARRIER_P (NEXT_INSN (BB_END (before_recovery))));
3691 label = emit_label_after (label, NEXT_INSN (BB_END (before_recovery)));
3693 rec = create_basic_block (label, label, before_recovery);
3694 emit_barrier_after (BB_END (rec));
3696 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
3697 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
3699 if (sched_verbose && spec_info->dump)
3700 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
3701 rec->index);
3703 before_recovery = rec;
3705 return rec;
3708 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
3709 INSN is a simple check, that should be converted to branchy one. */
3710 static void
3711 create_check_block_twin (rtx insn, bool mutate_p)
3713 basic_block rec;
3714 rtx label, check, twin, link;
3715 ds_t fs;
3717 gcc_assert (ORIG_PAT (insn)
3718 && (!mutate_p
3719 || (IS_SPECULATION_SIMPLE_CHECK_P (insn)
3720 && !(TODO_SPEC (insn) & SPECULATIVE))));
3722 /* Create recovery block. */
3723 if (mutate_p || targetm.sched.needs_block_p (insn))
3725 rec = create_recovery_block ();
3726 label = BB_HEAD (rec);
3728 else
3730 rec = EXIT_BLOCK_PTR;
3731 label = 0;
3734 /* Emit CHECK. */
3735 check = targetm.sched.gen_check (insn, label, mutate_p);
3737 if (rec != EXIT_BLOCK_PTR)
3739 /* To have mem_reg alive at the beginning of second_bb,
3740 we emit check BEFORE insn, so insn after splitting
3741 insn will be at the beginning of second_bb, which will
3742 provide us with the correct life information. */
3743 check = emit_jump_insn_before (check, insn);
3744 JUMP_LABEL (check) = label;
3745 LABEL_NUSES (label)++;
3747 else
3748 check = emit_insn_before (check, insn);
3750 /* Extend data structures. */
3751 extend_all (check);
3752 RECOVERY_BLOCK (check) = rec;
3754 if (sched_verbose && spec_info->dump)
3755 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
3756 (*current_sched_info->print_insn) (check, 0));
3758 gcc_assert (ORIG_PAT (insn));
3760 /* Initialize TWIN (twin is a duplicate of original instruction
3761 in the recovery block). */
3762 if (rec != EXIT_BLOCK_PTR)
3764 rtx link;
3766 for (link = RESOLVED_DEPS (insn); link; link = XEXP (link, 1))
3767 if (DEP_STATUS (link) & DEP_OUTPUT)
3769 RESOLVED_DEPS (check) =
3770 alloc_DEPS_LIST (XEXP (link, 0), RESOLVED_DEPS (check), DEP_TRUE);
3771 PUT_REG_NOTE_KIND (RESOLVED_DEPS (check), REG_DEP_TRUE);
3774 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
3775 extend_global (twin);
3777 if (sched_verbose && spec_info->dump)
3778 /* INSN_BB (insn) isn't determined for twin insns yet.
3779 So we can't use current_sched_info->print_insn. */
3780 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3781 INSN_UID (twin), rec->index);
3783 else
3785 ORIG_PAT (check) = ORIG_PAT (insn);
3786 HAS_INTERNAL_DEP (check) = 1;
3787 twin = check;
3788 /* ??? We probably should change all OUTPUT dependencies to
3789 (TRUE | OUTPUT). */
3792 RESOLVED_DEPS (twin) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn));
3794 if (rec != EXIT_BLOCK_PTR)
3795 /* In case of branchy check, fix CFG. */
3797 basic_block first_bb, second_bb;
3798 rtx jump;
3799 edge e;
3800 int edge_flags;
3802 first_bb = BLOCK_FOR_INSN (check);
3803 e = split_block (first_bb, check);
3804 /* split_block emits note if *check == BB_END. Probably it
3805 is better to rip that note off. */
3806 gcc_assert (e->src == first_bb);
3807 second_bb = e->dest;
3809 /* This is fixing of incoming edge. */
3810 /* ??? Which other flags should be specified? */
3811 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
3812 /* Partition type is the same, if it is "unpartitioned". */
3813 edge_flags = EDGE_CROSSING;
3814 else
3815 edge_flags = 0;
3817 e = make_edge (first_bb, rec, edge_flags);
3819 add_block (second_bb, first_bb);
3821 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (second_bb)));
3822 label = block_label (second_bb);
3823 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
3824 JUMP_LABEL (jump) = label;
3825 LABEL_NUSES (label)++;
3826 extend_global (jump);
3828 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
3829 /* Partition type is the same, if it is "unpartitioned". */
3831 /* Rewritten from cfgrtl.c. */
3832 if (flag_reorder_blocks_and_partition
3833 && targetm.have_named_sections
3834 /*&& !any_condjump_p (jump)*/)
3835 /* any_condjump_p (jump) == false.
3836 We don't need the same note for the check because
3837 any_condjump_p (check) == true. */
3839 REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP,
3840 NULL_RTX,
3841 REG_NOTES (jump));
3843 edge_flags = EDGE_CROSSING;
3845 else
3846 edge_flags = 0;
3848 make_single_succ_edge (rec, second_bb, edge_flags);
3850 add_block (rec, EXIT_BLOCK_PTR);
3853 /* Move backward dependences from INSN to CHECK and
3854 move forward dependences from INSN to TWIN. */
3855 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
3857 ds_t ds;
3859 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
3860 check --TRUE--> producer ??? or ANTI ???
3861 twin --TRUE--> producer
3862 twin --ANTI--> check
3864 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
3865 check --ANTI--> producer
3866 twin --ANTI--> producer
3867 twin --ANTI--> check
3869 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
3870 check ~~TRUE~~> producer
3871 twin ~~TRUE~~> producer
3872 twin --ANTI--> check */
3874 ds = DEP_STATUS (link);
3876 if (ds & BEGIN_SPEC)
3878 gcc_assert (!mutate_p);
3879 ds &= ~BEGIN_SPEC;
3882 if (rec != EXIT_BLOCK_PTR)
3884 add_back_forw_dep (check, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3885 add_back_forw_dep (twin, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3887 else
3888 add_back_forw_dep (check, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3891 for (link = LOG_LINKS (insn); link;)
3892 if ((DEP_STATUS (link) & BEGIN_SPEC)
3893 || mutate_p)
3894 /* We can delete this dep only if we totally overcome it with
3895 BEGIN_SPECULATION. */
3897 delete_back_forw_dep (insn, XEXP (link, 0));
3898 link = LOG_LINKS (insn);
3900 else
3901 link = XEXP (link, 1);
3903 fs = 0;
3905 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
3906 here. */
3908 gcc_assert (!DONE_SPEC (insn));
3910 if (!mutate_p)
3912 ds_t ts = TODO_SPEC (insn);
3914 DONE_SPEC (insn) = ts & BEGIN_SPEC;
3915 CHECK_SPEC (check) = ts & BEGIN_SPEC;
3917 if (ts & BEGIN_DATA)
3918 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
3919 if (ts & BEGIN_CONTROL)
3920 fs = set_dep_weak (fs, BE_IN_CONTROL, get_dep_weak (ts, BEGIN_CONTROL));
3922 else
3923 CHECK_SPEC (check) = CHECK_SPEC (insn);
3925 /* Future speculations: call the helper. */
3926 process_insn_depend_be_in_spec (INSN_DEPEND (insn), twin, fs);
3928 if (rec != EXIT_BLOCK_PTR)
3930 /* Which types of dependencies should we use here is,
3931 generally, machine-dependent question... But, for now,
3932 it is not. */
3934 if (!mutate_p)
3936 add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE);
3937 add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
3939 else
3941 if (spec_info->dump)
3942 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
3943 (*current_sched_info->print_insn) (insn, 0));
3945 for (link = INSN_DEPEND (insn); link; link = INSN_DEPEND (insn))
3946 delete_back_forw_dep (XEXP (link, 0), insn);
3948 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
3949 try_ready (check);
3951 sched_remove_insn (insn);
3954 add_back_forw_dep (twin, check, REG_DEP_ANTI, DEP_ANTI);
3956 else
3957 add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
3959 if (!mutate_p)
3960 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
3961 because it'll be done later in add_to_speculative_block. */
3963 clear_priorities (twin);
3964 calc_priorities (twin);
3968 /* Removes dependency between instructions in the recovery block REC
3969 and usual region instructions. It keeps inner dependences so it
3970 won't be necessary to recompute them. */
3971 static void
3972 fix_recovery_deps (basic_block rec)
3974 rtx note, insn, link, jump, ready_list = 0;
3975 bitmap_head in_ready;
3977 bitmap_initialize (&in_ready, 0);
3979 /* NOTE - a basic block note. */
3980 note = NEXT_INSN (BB_HEAD (rec));
3981 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
3982 insn = BB_END (rec);
3983 gcc_assert (JUMP_P (insn));
3984 insn = PREV_INSN (insn);
3988 for (link = INSN_DEPEND (insn); link;)
3990 rtx consumer;
3992 consumer = XEXP (link, 0);
3994 if (BLOCK_FOR_INSN (consumer) != rec)
3996 delete_back_forw_dep (consumer, insn);
3998 if (!bitmap_bit_p (&in_ready, INSN_LUID (consumer)))
4000 ready_list = alloc_INSN_LIST (consumer, ready_list);
4001 bitmap_set_bit (&in_ready, INSN_LUID (consumer));
4004 link = INSN_DEPEND (insn);
4006 else
4008 gcc_assert ((DEP_STATUS (link) & DEP_TYPES) == DEP_TRUE);
4010 link = XEXP (link, 1);
4014 insn = PREV_INSN (insn);
4016 while (insn != note);
4018 bitmap_clear (&in_ready);
4020 /* Try to add instructions to the ready or queue list. */
4021 for (link = ready_list; link; link = XEXP (link, 1))
4022 try_ready (XEXP (link, 0));
4023 free_INSN_LIST_list (&ready_list);
4025 /* Fixing jump's dependences. */
4026 insn = BB_HEAD (rec);
4027 jump = BB_END (rec);
4029 gcc_assert (LABEL_P (insn));
4030 insn = NEXT_INSN (insn);
4032 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4033 add_jump_dependencies (insn, jump);
4036 /* The function saves line notes at the beginning of block B. */
4037 static void
4038 associate_line_notes_with_blocks (basic_block b)
4040 rtx line;
4042 for (line = BB_HEAD (b); line; line = PREV_INSN (line))
4043 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
4045 line_note_head[b->index] = line;
4046 break;
4048 /* Do a forward search as well, since we won't get to see the first
4049 notes in a basic block. */
4050 for (line = BB_HEAD (b); line; line = NEXT_INSN (line))
4052 if (INSN_P (line))
4053 break;
4054 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
4055 line_note_head[b->index] = line;
4059 /* Changes pattern of the INSN to NEW_PAT. */
4060 static void
4061 change_pattern (rtx insn, rtx new_pat)
4063 int t;
4065 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
4066 gcc_assert (t);
4067 /* Invalidate INSN_COST, so it'll be recalculated. */
4068 INSN_COST (insn) = -1;
4069 /* Invalidate INSN_TICK, so it'll be recalculated. */
4070 INSN_TICK (insn) = INVALID_TICK;
4071 dfa_clear_single_insn_cache (insn);
4075 /* -1 - can't speculate,
4076 0 - for speculation with REQUEST mode it is OK to use
4077 current instruction pattern,
4078 1 - need to change pattern for *NEW_PAT to be speculative. */
4079 static int
4080 speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4082 gcc_assert (current_sched_info->flags & DO_SPECULATION
4083 && (request & SPECULATIVE));
4085 if (!NONJUMP_INSN_P (insn)
4086 || HAS_INTERNAL_DEP (insn)
4087 || SCHED_GROUP_P (insn)
4088 || side_effects_p (PATTERN (insn))
4089 || (request & spec_info->mask) != request)
4090 return -1;
4092 gcc_assert (!IS_SPECULATION_CHECK_P (insn));
4094 if (request & BE_IN_SPEC)
4096 if (may_trap_p (PATTERN (insn)))
4097 return -1;
4099 if (!(request & BEGIN_SPEC))
4100 return 0;
4103 return targetm.sched.speculate_insn (insn, request & BEGIN_SPEC, new_pat);
4106 /* Print some information about block BB, which starts with HEAD and
4107 ends with TAIL, before scheduling it.
4108 I is zero, if scheduler is about to start with the fresh ebb. */
4109 static void
4110 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
4112 if (!i)
4113 fprintf (sched_dump,
4114 ";; ======================================================\n");
4115 else
4116 fprintf (sched_dump,
4117 ";; =====================ADVANCING TO=====================\n");
4118 fprintf (sched_dump,
4119 ";; -- basic block %d from %d to %d -- %s reload\n",
4120 bb->index, INSN_UID (head), INSN_UID (tail),
4121 (reload_completed ? "after" : "before"));
4122 fprintf (sched_dump,
4123 ";; ======================================================\n");
4124 fprintf (sched_dump, "\n");
4127 /* Unlink basic block notes and labels and saves them, so they
4128 can be easily restored. We unlink basic block notes in EBB to
4129 provide back-compatibility with the previous code, as target backends
4130 assume, that there'll be only instructions between
4131 current_sched_info->{head and tail}. We restore these notes as soon
4132 as we can.
4133 FIRST (LAST) is the first (last) basic block in the ebb.
4134 NB: In usual case (FIRST == LAST) nothing is really done. */
4135 void
4136 unlink_bb_notes (basic_block first, basic_block last)
4138 /* We DON'T unlink basic block notes of the first block in the ebb. */
4139 if (first == last)
4140 return;
4142 bb_header = xmalloc (last_basic_block * sizeof (*bb_header));
4144 /* Make a sentinel. */
4145 if (last->next_bb != EXIT_BLOCK_PTR)
4146 bb_header[last->next_bb->index] = 0;
4148 first = first->next_bb;
4151 rtx prev, label, note, next;
4153 label = BB_HEAD (last);
4154 if (LABEL_P (label))
4155 note = NEXT_INSN (label);
4156 else
4157 note = label;
4158 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4160 prev = PREV_INSN (label);
4161 next = NEXT_INSN (note);
4162 gcc_assert (prev && next);
4164 NEXT_INSN (prev) = next;
4165 PREV_INSN (next) = prev;
4167 bb_header[last->index] = label;
4169 if (last == first)
4170 break;
4172 last = last->prev_bb;
4174 while (1);
4177 /* Restore basic block notes.
4178 FIRST is the first basic block in the ebb. */
4179 static void
4180 restore_bb_notes (basic_block first)
4182 if (!bb_header)
4183 return;
4185 /* We DON'T unlink basic block notes of the first block in the ebb. */
4186 first = first->next_bb;
4187 /* Remember: FIRST is actually a second basic block in the ebb. */
4189 while (first != EXIT_BLOCK_PTR
4190 && bb_header[first->index])
4192 rtx prev, label, note, next;
4194 label = bb_header[first->index];
4195 prev = PREV_INSN (label);
4196 next = NEXT_INSN (prev);
4198 if (LABEL_P (label))
4199 note = NEXT_INSN (label);
4200 else
4201 note = label;
4202 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4204 bb_header[first->index] = 0;
4206 NEXT_INSN (prev) = label;
4207 NEXT_INSN (note) = next;
4208 PREV_INSN (next) = note;
4210 first = first->next_bb;
4213 free (bb_header);
4214 bb_header = 0;
4217 /* Extend per basic block data structures of the scheduler.
4218 If BB is NULL, initialize structures for the whole CFG.
4219 Otherwise, initialize them for the just created BB. */
4220 static void
4221 extend_bb (basic_block bb)
4223 rtx insn;
4225 if (write_symbols != NO_DEBUG)
4227 /* Save-line-note-head:
4228 Determine the line-number at the start of each basic block.
4229 This must be computed and saved now, because after a basic block's
4230 predecessor has been scheduled, it is impossible to accurately
4231 determine the correct line number for the first insn of the block. */
4232 line_note_head = xrecalloc (line_note_head, last_basic_block,
4233 old_last_basic_block,
4234 sizeof (*line_note_head));
4236 if (bb)
4237 associate_line_notes_with_blocks (bb);
4238 else
4239 FOR_EACH_BB (bb)
4240 associate_line_notes_with_blocks (bb);
4243 old_last_basic_block = last_basic_block;
4245 if (current_sched_info->flags & USE_GLAT)
4247 glat_start = xrealloc (glat_start,
4248 last_basic_block * sizeof (*glat_start));
4249 glat_end = xrealloc (glat_end, last_basic_block * sizeof (*glat_end));
4252 /* The following is done to keep current_sched_info->next_tail non null. */
4254 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
4255 if (NEXT_INSN (insn) == 0
4256 || (!NOTE_P (insn)
4257 && !LABEL_P (insn)
4258 /* Don't emit a NOTE if it would end up before a BARRIER. */
4259 && !BARRIER_P (NEXT_INSN (insn))))
4261 emit_note_after (NOTE_INSN_DELETED, insn);
4262 /* Make insn to appear outside BB. */
4263 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
4267 /* Add a basic block BB to extended basic block EBB.
4268 If EBB is EXIT_BLOCK_PTR, then BB is recovery block.
4269 If EBB is NULL, then BB should be a new region. */
4270 void
4271 add_block (basic_block bb, basic_block ebb)
4273 gcc_assert (current_sched_info->flags & DETACH_LIFE_INFO
4274 && bb->il.rtl->global_live_at_start == 0
4275 && bb->il.rtl->global_live_at_end == 0);
4277 extend_bb (bb);
4279 glat_start[bb->index] = 0;
4280 glat_end[bb->index] = 0;
4282 if (current_sched_info->add_block)
4283 /* This changes only data structures of the front-end. */
4284 current_sched_info->add_block (bb, ebb);
4287 /* Helper function.
4288 Fix CFG after both in- and inter-block movement of
4289 control_flow_insn_p JUMP. */
4290 static void
4291 fix_jump_move (rtx jump)
4293 basic_block bb, jump_bb, jump_bb_next;
4295 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4296 jump_bb = BLOCK_FOR_INSN (jump);
4297 jump_bb_next = jump_bb->next_bb;
4299 gcc_assert (current_sched_info->flags & SCHED_EBB
4300 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
4302 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
4303 /* if jump_bb_next is not empty. */
4304 BB_END (jump_bb) = BB_END (jump_bb_next);
4306 if (BB_END (bb) != PREV_INSN (jump))
4307 /* Then there are instruction after jump that should be placed
4308 to jump_bb_next. */
4309 BB_END (jump_bb_next) = BB_END (bb);
4310 else
4311 /* Otherwise jump_bb_next is empty. */
4312 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
4314 /* To make assertion in move_insn happy. */
4315 BB_END (bb) = PREV_INSN (jump);
4317 update_bb_for_insn (jump_bb_next);
4320 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
4321 static void
4322 move_block_after_check (rtx jump)
4324 basic_block bb, jump_bb, jump_bb_next;
4325 VEC(edge,gc) *t;
4327 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4328 jump_bb = BLOCK_FOR_INSN (jump);
4329 jump_bb_next = jump_bb->next_bb;
4331 update_bb_for_insn (jump_bb);
4333 gcc_assert (IS_SPECULATION_CHECK_P (jump)
4334 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
4336 unlink_block (jump_bb_next);
4337 link_block (jump_bb_next, bb);
4339 t = bb->succs;
4340 bb->succs = 0;
4341 move_succs (&(jump_bb->succs), bb);
4342 move_succs (&(jump_bb_next->succs), jump_bb);
4343 move_succs (&t, jump_bb_next);
4345 if (current_sched_info->fix_recovery_cfg)
4346 current_sched_info->fix_recovery_cfg
4347 (bb->index, jump_bb->index, jump_bb_next->index);
4350 /* Helper function for move_block_after_check.
4351 This functions attaches edge vector pointed to by SUCCSP to
4352 block TO. */
4353 static void
4354 move_succs (VEC(edge,gc) **succsp, basic_block to)
4356 edge e;
4357 edge_iterator ei;
4359 gcc_assert (to->succs == 0);
4361 to->succs = *succsp;
4363 FOR_EACH_EDGE (e, ei, to->succs)
4364 e->src = to;
4366 *succsp = 0;
4369 /* Initialize GLAT (global_live_at_{start, end}) structures.
4370 GLAT structures are used to substitute global_live_{start, end}
4371 regsets during scheduling. This is necessary to use such functions as
4372 split_block (), as they assume consistency of register live information. */
4373 static void
4374 init_glat (void)
4376 basic_block bb;
4378 FOR_ALL_BB (bb)
4379 init_glat1 (bb);
4382 /* Helper function for init_glat. */
4383 static void
4384 init_glat1 (basic_block bb)
4386 gcc_assert (bb->il.rtl->global_live_at_start != 0
4387 && bb->il.rtl->global_live_at_end != 0);
4389 glat_start[bb->index] = bb->il.rtl->global_live_at_start;
4390 glat_end[bb->index] = bb->il.rtl->global_live_at_end;
4392 if (current_sched_info->flags & DETACH_LIFE_INFO)
4394 bb->il.rtl->global_live_at_start = 0;
4395 bb->il.rtl->global_live_at_end = 0;
4399 /* Attach reg_live_info back to basic blocks.
4400 Also save regsets, that should not have been changed during scheduling,
4401 for checking purposes (see check_reg_live). */
4402 void
4403 attach_life_info (void)
4405 basic_block bb;
4407 FOR_ALL_BB (bb)
4408 attach_life_info1 (bb);
4411 /* Helper function for attach_life_info. */
4412 static void
4413 attach_life_info1 (basic_block bb)
4415 gcc_assert (bb->il.rtl->global_live_at_start == 0
4416 && bb->il.rtl->global_live_at_end == 0);
4418 if (glat_start[bb->index])
4420 gcc_assert (glat_end[bb->index]);
4422 bb->il.rtl->global_live_at_start = glat_start[bb->index];
4423 bb->il.rtl->global_live_at_end = glat_end[bb->index];
4425 /* Make them NULL, so they won't be freed in free_glat. */
4426 glat_start[bb->index] = 0;
4427 glat_end[bb->index] = 0;
4429 #ifdef ENABLE_CHECKING
4430 if (bb->index < NUM_FIXED_BLOCKS
4431 || current_sched_info->region_head_or_leaf_p (bb, 0))
4433 glat_start[bb->index] = ALLOC_REG_SET (&reg_obstack);
4434 COPY_REG_SET (glat_start[bb->index],
4435 bb->il.rtl->global_live_at_start);
4438 if (bb->index < NUM_FIXED_BLOCKS
4439 || current_sched_info->region_head_or_leaf_p (bb, 1))
4441 glat_end[bb->index] = ALLOC_REG_SET (&reg_obstack);
4442 COPY_REG_SET (glat_end[bb->index], bb->il.rtl->global_live_at_end);
4444 #endif
4446 else
4448 gcc_assert (!glat_end[bb->index]);
4450 bb->il.rtl->global_live_at_start = ALLOC_REG_SET (&reg_obstack);
4451 bb->il.rtl->global_live_at_end = ALLOC_REG_SET (&reg_obstack);
4455 /* Free GLAT information. */
4456 static void
4457 free_glat (void)
4459 #ifdef ENABLE_CHECKING
4460 if (current_sched_info->flags & DETACH_LIFE_INFO)
4462 basic_block bb;
4464 FOR_ALL_BB (bb)
4466 if (glat_start[bb->index])
4467 FREE_REG_SET (glat_start[bb->index]);
4468 if (glat_end[bb->index])
4469 FREE_REG_SET (glat_end[bb->index]);
4472 #endif
4474 free (glat_start);
4475 free (glat_end);
4478 /* Remove INSN from the instruction stream.
4479 INSN should have any dependencies. */
4480 static void
4481 sched_remove_insn (rtx insn)
4483 change_queue_index (insn, QUEUE_NOWHERE);
4484 current_sched_info->add_remove_insn (insn, 1);
4485 remove_insn (insn);
4488 /* Clear priorities of all instructions, that are
4489 forward dependent on INSN. */
4490 static void
4491 clear_priorities (rtx insn)
4493 rtx link;
4495 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
4497 rtx pro;
4499 pro = XEXP (link, 0);
4500 if (INSN_PRIORITY_KNOWN (pro))
4502 INSN_PRIORITY_KNOWN (pro) = 0;
4503 clear_priorities (pro);
4508 /* Recompute priorities of instructions, whose priorities might have been
4509 changed due to changes in INSN. */
4510 static void
4511 calc_priorities (rtx insn)
4513 rtx link;
4515 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
4517 rtx pro;
4519 pro = XEXP (link, 0);
4520 if (!INSN_PRIORITY_KNOWN (pro))
4522 priority (pro);
4523 calc_priorities (pro);
4529 /* Add dependences between JUMP and other instructions in the recovery
4530 block. INSN is the first insn the recovery block. */
4531 static void
4532 add_jump_dependencies (rtx insn, rtx jump)
4536 insn = NEXT_INSN (insn);
4537 if (insn == jump)
4538 break;
4540 if (!INSN_DEPEND (insn))
4541 add_back_forw_dep (jump, insn, REG_DEP_ANTI, DEP_ANTI);
4543 while (1);
4544 gcc_assert (LOG_LINKS (jump));
4547 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
4549 bb_note (basic_block bb)
4551 rtx note;
4553 note = BB_HEAD (bb);
4554 if (LABEL_P (note))
4555 note = NEXT_INSN (note);
4557 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4558 return note;
4561 #ifdef ENABLE_CHECKING
4562 extern void debug_spec_status (ds_t);
4564 /* Dump information about the dependence status S. */
4565 void
4566 debug_spec_status (ds_t s)
4568 FILE *f = stderr;
4570 if (s & BEGIN_DATA)
4571 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak (s, BEGIN_DATA));
4572 if (s & BE_IN_DATA)
4573 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak (s, BE_IN_DATA));
4574 if (s & BEGIN_CONTROL)
4575 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak (s, BEGIN_CONTROL));
4576 if (s & BE_IN_CONTROL)
4577 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak (s, BE_IN_CONTROL));
4579 if (s & HARD_DEP)
4580 fprintf (f, "HARD_DEP; ");
4582 if (s & DEP_TRUE)
4583 fprintf (f, "DEP_TRUE; ");
4584 if (s & DEP_ANTI)
4585 fprintf (f, "DEP_ANTI; ");
4586 if (s & DEP_OUTPUT)
4587 fprintf (f, "DEP_OUTPUT; ");
4589 fprintf (f, "\n");
4592 /* Helper function for check_cfg.
4593 Return nonzero, if edge vector pointed to by EL has edge with TYPE in
4594 its flags. */
4595 static int
4596 has_edge_p (VEC(edge,gc) *el, int type)
4598 edge e;
4599 edge_iterator ei;
4601 FOR_EACH_EDGE (e, ei, el)
4602 if (e->flags & type)
4603 return 1;
4604 return 0;
4607 /* Check few properties of CFG between HEAD and TAIL.
4608 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
4609 instruction stream. */
4610 static void
4611 check_cfg (rtx head, rtx tail)
4613 rtx next_tail;
4614 basic_block bb = 0;
4615 int not_first = 0, not_last;
4617 if (head == NULL)
4618 head = get_insns ();
4619 if (tail == NULL)
4620 tail = get_last_insn ();
4621 next_tail = NEXT_INSN (tail);
4625 not_last = head != tail;
4627 if (not_first)
4628 gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
4629 if (not_last)
4630 gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
4632 if (LABEL_P (head)
4633 || (NOTE_INSN_BASIC_BLOCK_P (head)
4634 && (!not_first
4635 || (not_first && !LABEL_P (PREV_INSN (head))))))
4637 gcc_assert (bb == 0);
4638 bb = BLOCK_FOR_INSN (head);
4639 if (bb != 0)
4640 gcc_assert (BB_HEAD (bb) == head);
4641 else
4642 /* This is the case of jump table. See inside_basic_block_p (). */
4643 gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
4646 if (bb == 0)
4648 gcc_assert (!inside_basic_block_p (head));
4649 head = NEXT_INSN (head);
4651 else
4653 gcc_assert (inside_basic_block_p (head)
4654 || NOTE_P (head));
4655 gcc_assert (BLOCK_FOR_INSN (head) == bb);
4657 if (LABEL_P (head))
4659 head = NEXT_INSN (head);
4660 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
4662 else
4664 if (control_flow_insn_p (head))
4666 gcc_assert (BB_END (bb) == head);
4668 if (any_uncondjump_p (head))
4669 gcc_assert (EDGE_COUNT (bb->succs) == 1
4670 && BARRIER_P (NEXT_INSN (head)));
4671 else if (any_condjump_p (head))
4672 gcc_assert (/* Usual case. */
4673 (EDGE_COUNT (bb->succs) > 1
4674 && !BARRIER_P (NEXT_INSN (head)))
4675 /* Or jump to the next instruction. */
4676 || (EDGE_COUNT (bb->succs) == 1
4677 && (BB_HEAD (EDGE_I (bb->succs, 0)->dest)
4678 == JUMP_LABEL (head))));
4680 if (BB_END (bb) == head)
4682 if (EDGE_COUNT (bb->succs) > 1)
4683 gcc_assert (control_flow_insn_p (head)
4684 || has_edge_p (bb->succs, EDGE_COMPLEX));
4685 bb = 0;
4688 head = NEXT_INSN (head);
4692 not_first = 1;
4694 while (head != next_tail);
4696 gcc_assert (bb == 0);
4699 /* Perform a few consistency checks of flags in different data structures. */
4700 static void
4701 check_sched_flags (void)
4703 unsigned int f = current_sched_info->flags;
4705 if (flag_sched_stalled_insns)
4706 gcc_assert (!(f & DO_SPECULATION));
4707 if (f & DO_SPECULATION)
4708 gcc_assert (!flag_sched_stalled_insns
4709 && (f & DETACH_LIFE_INFO)
4710 && spec_info
4711 && spec_info->mask);
4712 if (f & DETACH_LIFE_INFO)
4713 gcc_assert (f & USE_GLAT);
4716 /* Check global_live_at_{start, end} regsets.
4717 If FATAL_P is TRUE, then abort execution at the first failure.
4718 Otherwise, print diagnostics to STDERR (this mode is for calling
4719 from debugger). */
4720 void
4721 check_reg_live (bool fatal_p)
4723 basic_block bb;
4725 FOR_ALL_BB (bb)
4727 int i;
4729 i = bb->index;
4731 if (glat_start[i])
4733 bool b = bitmap_equal_p (bb->il.rtl->global_live_at_start,
4734 glat_start[i]);
4736 if (!b)
4738 gcc_assert (!fatal_p);
4740 fprintf (stderr, ";; check_reg_live_at_start (%d) failed.\n", i);
4744 if (glat_end[i])
4746 bool b = bitmap_equal_p (bb->il.rtl->global_live_at_end,
4747 glat_end[i]);
4749 if (!b)
4751 gcc_assert (!fatal_p);
4753 fprintf (stderr, ";; check_reg_live_at_end (%d) failed.\n", i);
4758 #endif /* ENABLE_CHECKING */
4760 #endif /* INSN_SCHEDULING */