Mark as release
[official-gcc.git] / gcc / haifa-sched.c
bloba9988b6bc359e5a44a1b6613d8dbcb8058eb7b63
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 /* Instruction scheduling pass. This file, along with sched-deps.c,
24 contains the generic parts. The actual entry point is found for
25 the normal instruction scheduling pass is found in sched-rgn.c.
27 We compute insn priorities based on data dependencies. Flow
28 analysis only creates a fraction of the data-dependencies we must
29 observe: namely, only those dependencies which the combiner can be
30 expected to use. For this pass, we must therefore create the
31 remaining dependencies we need to observe: register dependencies,
32 memory dependencies, dependencies to keep function calls in order,
33 and the dependence between a conditional branch and the setting of
34 condition codes are all dealt with here.
36 The scheduler first traverses the data flow graph, starting with
37 the last instruction, and proceeding to the first, assigning values
38 to insn_priority as it goes. This sorts the instructions
39 topologically by data dependence.
41 Once priorities have been established, we order the insns using
42 list scheduling. This works as follows: starting with a list of
43 all the ready insns, and sorted according to priority number, we
44 schedule the insn from the end of the list by placing its
45 predecessors in the list according to their priority order. We
46 consider this insn scheduled by setting the pointer to the "end" of
47 the list to point to the previous insn. When an insn has no
48 predecessors, we either queue it until sufficient time has elapsed
49 or add it to the ready list. As the instructions are scheduled or
50 when stalls are introduced, the queue advances and dumps insns into
51 the ready list. When all insns down to the lowest priority have
52 been scheduled, the critical path of the basic block has been made
53 as short as possible. The remaining insns are then scheduled in
54 remaining slots.
56 The following list shows the order in which we want to break ties
57 among insns in the ready list:
59 1. choose insn with the longest path to end of bb, ties
60 broken by
61 2. choose insn with least contribution to register pressure,
62 ties broken by
63 3. prefer in-block upon interblock motion, ties broken by
64 4. prefer useful upon speculative motion, ties broken by
65 5. choose insn with largest control flow probability, ties
66 broken by
67 6. choose insn with the least dependences upon the previously
68 scheduled insn, or finally
69 7 choose the insn which has the most insns dependent on it.
70 8. choose insn with lowest UID.
72 Memory references complicate matters. Only if we can be certain
73 that memory references are not part of the data dependency graph
74 (via true, anti, or output dependence), can we move operations past
75 memory references. To first approximation, reads can be done
76 independently, while writes introduce dependencies. Better
77 approximations will yield fewer dependencies.
79 Before reload, an extended analysis of interblock data dependences
80 is required for interblock scheduling. This is performed in
81 compute_block_backward_dependences ().
83 Dependencies set up by memory references are treated in exactly the
84 same way as other dependencies, by using LOG_LINKS backward
85 dependences. LOG_LINKS are translated into INSN_DEPEND forward
86 dependences for the purpose of forward list scheduling.
88 Having optimized the critical path, we may have also unduly
89 extended the lifetimes of some registers. If an operation requires
90 that constants be loaded into registers, it is certainly desirable
91 to load those constants as early as necessary, but no earlier.
92 I.e., it will not do to load up a bunch of registers at the
93 beginning of a basic block only to use them at the end, if they
94 could be loaded later, since this may result in excessive register
95 utilization.
97 Note that since branches are never in basic blocks, but only end
98 basic blocks, this pass will not move branches. But that is ok,
99 since we can use GNU's delayed branch scheduling pass to take care
100 of this case.
102 Also note that no further optimizations based on algebraic
103 identities are performed, so this pass would be a good one to
104 perform instruction splitting, such as breaking up a multiply
105 instruction into shifts and adds where that is profitable.
107 Given the memory aliasing analysis that this pass should perform,
108 it should be possible to remove redundant stores to memory, and to
109 load values from registers instead of hitting memory.
111 Before reload, speculative insns are moved only if a 'proof' exists
112 that no exception will be caused by this, and if no live registers
113 exist that inhibit the motion (live registers constraints are not
114 represented by data dependence edges).
116 This pass must update information that subsequent passes expect to
117 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
118 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
120 The information in the line number notes is carefully retained by
121 this pass. Notes that refer to the starting and ending of
122 exception regions are also carefully retained by this pass. All
123 other NOTE insns are grouped in their same relative order at the
124 beginning of basic blocks and regions that have been scheduled. */
126 #include "config.h"
127 #include "system.h"
128 #include "coretypes.h"
129 #include "tm.h"
130 #include "toplev.h"
131 #include "rtl.h"
132 #include "tm_p.h"
133 #include "hard-reg-set.h"
134 #include "regs.h"
135 #include "function.h"
136 #include "flags.h"
137 #include "insn-config.h"
138 #include "insn-attr.h"
139 #include "except.h"
140 #include "toplev.h"
141 #include "recog.h"
142 #include "sched-int.h"
143 #include "target.h"
144 #include "output.h"
145 #include "params.h"
147 #ifdef INSN_SCHEDULING
149 /* issue_rate is the number of insns that can be scheduled in the same
150 machine cycle. It can be defined in the config/mach/mach.h file,
151 otherwise we set it to 1. */
153 static int issue_rate;
155 /* sched-verbose controls the amount of debugging output the
156 scheduler prints. It is controlled by -fsched-verbose=N:
157 N>0 and no -DSR : the output is directed to stderr.
158 N>=10 will direct the printouts to stderr (regardless of -dSR).
159 N=1: same as -dSR.
160 N=2: bb's probabilities, detailed ready list info, unit/insn info.
161 N=3: rtl at abort point, control-flow, regions info.
162 N=5: dependences info. */
164 static int sched_verbose_param = 0;
165 int sched_verbose = 0;
167 /* Debugging file. All printouts are sent to dump, which is always set,
168 either to stderr, or to the dump listing file (-dRS). */
169 FILE *sched_dump = 0;
171 /* Highest uid before scheduling. */
172 static int old_max_uid;
174 /* fix_sched_param() is called from toplev.c upon detection
175 of the -fsched-verbose=N option. */
177 void
178 fix_sched_param (const char *param, const char *val)
180 if (!strcmp (param, "verbose"))
181 sched_verbose_param = atoi (val);
182 else
183 warning (0, "fix_sched_param: unknown param: %s", param);
186 struct haifa_insn_data *h_i_d;
188 #define LINE_NOTE(INSN) (h_i_d[INSN_UID (INSN)].line_note)
189 #define INSN_TICK(INSN) (h_i_d[INSN_UID (INSN)].tick)
190 #define INTER_TICK(INSN) (h_i_d[INSN_UID (INSN)].inter_tick)
192 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
193 then it should be recalculated from scratch. */
194 #define INVALID_TICK (-(max_insn_queue_index + 1))
195 /* The minimal value of the INSN_TICK of an instruction. */
196 #define MIN_TICK (-max_insn_queue_index)
198 /* Issue points are used to distinguish between instructions in max_issue ().
199 For now, all instructions are equally good. */
200 #define ISSUE_POINTS(INSN) 1
202 /* Vector indexed by basic block number giving the starting line-number
203 for each basic block. */
204 static rtx *line_note_head;
206 /* List of important notes we must keep around. This is a pointer to the
207 last element in the list. */
208 static rtx note_list;
210 static struct spec_info_def spec_info_var;
211 /* Description of the speculative part of the scheduling.
212 If NULL - no speculation. */
213 static spec_info_t spec_info;
215 /* True, if recovery block was added during scheduling of current block.
216 Used to determine, if we need to fix INSN_TICKs. */
217 static bool added_recovery_block_p;
219 /* Counters of different types of speculative instructions. */
220 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
222 /* Pointers to GLAT data. See init_glat for more information. */
223 regset *glat_start, *glat_end;
225 /* Array used in {unlink, restore}_bb_notes. */
226 static rtx *bb_header = 0;
228 /* Number of basic_blocks. */
229 static int old_last_basic_block;
231 /* Basic block after which recovery blocks will be created. */
232 static basic_block before_recovery;
234 /* Queues, etc. */
236 /* An instruction is ready to be scheduled when all insns preceding it
237 have already been scheduled. It is important to ensure that all
238 insns which use its result will not be executed until its result
239 has been computed. An insn is maintained in one of four structures:
241 (P) the "Pending" set of insns which cannot be scheduled until
242 their dependencies have been satisfied.
243 (Q) the "Queued" set of insns that can be scheduled when sufficient
244 time has passed.
245 (R) the "Ready" list of unscheduled, uncommitted insns.
246 (S) the "Scheduled" list of insns.
248 Initially, all insns are either "Pending" or "Ready" depending on
249 whether their dependencies are satisfied.
251 Insns move from the "Ready" list to the "Scheduled" list as they
252 are committed to the schedule. As this occurs, the insns in the
253 "Pending" list have their dependencies satisfied and move to either
254 the "Ready" list or the "Queued" set depending on whether
255 sufficient time has passed to make them ready. As time passes,
256 insns move from the "Queued" set to the "Ready" list.
258 The "Pending" list (P) are the insns in the INSN_DEPEND of the unscheduled
259 insns, i.e., those that are ready, queued, and pending.
260 The "Queued" set (Q) is implemented by the variable `insn_queue'.
261 The "Ready" list (R) is implemented by the variables `ready' and
262 `n_ready'.
263 The "Scheduled" list (S) is the new insn chain built by this pass.
265 The transition (R->S) is implemented in the scheduling loop in
266 `schedule_block' when the best insn to schedule is chosen.
267 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
268 insns move from the ready list to the scheduled list.
269 The transition (Q->R) is implemented in 'queue_to_insn' as time
270 passes or stalls are introduced. */
272 /* Implement a circular buffer to delay instructions until sufficient
273 time has passed. For the new pipeline description interface,
274 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
275 than maximal time of instruction execution computed by genattr.c on
276 the base maximal time of functional unit reservations and getting a
277 result. This is the longest time an insn may be queued. */
279 static rtx *insn_queue;
280 static int q_ptr = 0;
281 static int q_size = 0;
282 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
283 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
285 #define QUEUE_SCHEDULED (-3)
286 #define QUEUE_NOWHERE (-2)
287 #define QUEUE_READY (-1)
288 /* QUEUE_SCHEDULED - INSN is scheduled.
289 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
290 queue or ready list.
291 QUEUE_READY - INSN is in ready list.
292 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
294 #define QUEUE_INDEX(INSN) (h_i_d[INSN_UID (INSN)].queue_index)
296 /* The following variable value refers for all current and future
297 reservations of the processor units. */
298 state_t curr_state;
300 /* The following variable value is size of memory representing all
301 current and future reservations of the processor units. */
302 static size_t dfa_state_size;
304 /* The following array is used to find the best insn from ready when
305 the automaton pipeline interface is used. */
306 static char *ready_try;
308 /* Describe the ready list of the scheduler.
309 VEC holds space enough for all insns in the current region. VECLEN
310 says how many exactly.
311 FIRST is the index of the element with the highest priority; i.e. the
312 last one in the ready list, since elements are ordered by ascending
313 priority.
314 N_READY determines how many insns are on the ready list. */
316 struct ready_list
318 rtx *vec;
319 int veclen;
320 int first;
321 int n_ready;
324 /* The pointer to the ready list. */
325 static struct ready_list *readyp;
327 /* Scheduling clock. */
328 static int clock_var;
330 /* Number of instructions in current scheduling region. */
331 static int rgn_n_insns;
333 static int may_trap_exp (rtx, int);
335 /* Nonzero iff the address is comprised from at most 1 register. */
336 #define CONST_BASED_ADDRESS_P(x) \
337 (REG_P (x) \
338 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
339 || (GET_CODE (x) == LO_SUM)) \
340 && (CONSTANT_P (XEXP (x, 0)) \
341 || CONSTANT_P (XEXP (x, 1)))))
343 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
344 as found by analyzing insn's expression. */
346 static int
347 may_trap_exp (rtx x, int is_store)
349 enum rtx_code code;
351 if (x == 0)
352 return TRAP_FREE;
353 code = GET_CODE (x);
354 if (is_store)
356 if (code == MEM && may_trap_p (x))
357 return TRAP_RISKY;
358 else
359 return TRAP_FREE;
361 if (code == MEM)
363 /* The insn uses memory: a volatile load. */
364 if (MEM_VOLATILE_P (x))
365 return IRISKY;
366 /* An exception-free load. */
367 if (!may_trap_p (x))
368 return IFREE;
369 /* A load with 1 base register, to be further checked. */
370 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
371 return PFREE_CANDIDATE;
372 /* No info on the load, to be further checked. */
373 return PRISKY_CANDIDATE;
375 else
377 const char *fmt;
378 int i, insn_class = TRAP_FREE;
380 /* Neither store nor load, check if it may cause a trap. */
381 if (may_trap_p (x))
382 return TRAP_RISKY;
383 /* Recursive step: walk the insn... */
384 fmt = GET_RTX_FORMAT (code);
385 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
387 if (fmt[i] == 'e')
389 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
390 insn_class = WORST_CLASS (insn_class, tmp_class);
392 else if (fmt[i] == 'E')
394 int j;
395 for (j = 0; j < XVECLEN (x, i); j++)
397 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
398 insn_class = WORST_CLASS (insn_class, tmp_class);
399 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
400 break;
403 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
404 break;
406 return insn_class;
410 /* Classifies insn for the purpose of verifying that it can be
411 moved speculatively, by examining it's patterns, returning:
412 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
413 TRAP_FREE: non-load insn.
414 IFREE: load from a globally safe location.
415 IRISKY: volatile load.
416 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
417 being either PFREE or PRISKY. */
420 haifa_classify_insn (rtx insn)
422 rtx pat = PATTERN (insn);
423 int tmp_class = TRAP_FREE;
424 int insn_class = TRAP_FREE;
425 enum rtx_code code;
427 if (GET_CODE (pat) == PARALLEL)
429 int i, len = XVECLEN (pat, 0);
431 for (i = len - 1; i >= 0; i--)
433 code = GET_CODE (XVECEXP (pat, 0, i));
434 switch (code)
436 case CLOBBER:
437 /* Test if it is a 'store'. */
438 tmp_class = may_trap_exp (XEXP (XVECEXP (pat, 0, i), 0), 1);
439 break;
440 case SET:
441 /* Test if it is a store. */
442 tmp_class = may_trap_exp (SET_DEST (XVECEXP (pat, 0, i)), 1);
443 if (tmp_class == TRAP_RISKY)
444 break;
445 /* Test if it is a load. */
446 tmp_class
447 = WORST_CLASS (tmp_class,
448 may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)),
449 0));
450 break;
451 case COND_EXEC:
452 case TRAP_IF:
453 tmp_class = TRAP_RISKY;
454 break;
455 default:
458 insn_class = WORST_CLASS (insn_class, tmp_class);
459 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
460 break;
463 else
465 code = GET_CODE (pat);
466 switch (code)
468 case CLOBBER:
469 /* Test if it is a 'store'. */
470 tmp_class = may_trap_exp (XEXP (pat, 0), 1);
471 break;
472 case SET:
473 /* Test if it is a store. */
474 tmp_class = may_trap_exp (SET_DEST (pat), 1);
475 if (tmp_class == TRAP_RISKY)
476 break;
477 /* Test if it is a load. */
478 tmp_class =
479 WORST_CLASS (tmp_class,
480 may_trap_exp (SET_SRC (pat), 0));
481 break;
482 case COND_EXEC:
483 case TRAP_IF:
484 tmp_class = TRAP_RISKY;
485 break;
486 default:;
488 insn_class = tmp_class;
491 return insn_class;
494 /* Forward declarations. */
496 HAIFA_INLINE static int insn_cost1 (rtx, enum reg_note, rtx, rtx);
497 static int priority (rtx);
498 static int rank_for_schedule (const void *, const void *);
499 static void swap_sort (rtx *, int);
500 static void queue_insn (rtx, int);
501 static int schedule_insn (rtx);
502 static int find_set_reg_weight (rtx);
503 static void find_insn_reg_weight (basic_block);
504 static void find_insn_reg_weight1 (rtx);
505 static void adjust_priority (rtx);
506 static void advance_one_cycle (void);
508 /* Notes handling mechanism:
509 =========================
510 Generally, NOTES are saved before scheduling and restored after scheduling.
511 The scheduler distinguishes between three types of notes:
513 (1) LINE_NUMBER notes, generated and used for debugging. Here,
514 before scheduling a region, a pointer to the LINE_NUMBER note is
515 added to the insn following it (in save_line_notes()), and the note
516 is removed (in rm_line_notes() and unlink_line_notes()). After
517 scheduling the region, this pointer is used for regeneration of
518 the LINE_NUMBER note (in restore_line_notes()).
520 (2) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
521 Before scheduling a region, a pointer to the note is added to the insn
522 that follows or precedes it. (This happens as part of the data dependence
523 computation). After scheduling an insn, the pointer contained in it is
524 used for regenerating the corresponding note (in reemit_notes).
526 (3) All other notes (e.g. INSN_DELETED): Before scheduling a block,
527 these notes are put in a list (in rm_other_notes() and
528 unlink_other_notes ()). After scheduling the block, these notes are
529 inserted at the beginning of the block (in schedule_block()). */
531 static rtx unlink_other_notes (rtx, rtx);
532 static rtx unlink_line_notes (rtx, rtx);
533 static void reemit_notes (rtx);
535 static rtx *ready_lastpos (struct ready_list *);
536 static void ready_add (struct ready_list *, rtx, bool);
537 static void ready_sort (struct ready_list *);
538 static rtx ready_remove_first (struct ready_list *);
540 static void queue_to_ready (struct ready_list *);
541 static int early_queue_to_ready (state_t, struct ready_list *);
543 static void debug_ready_list (struct ready_list *);
545 static void move_insn (rtx);
547 /* The following functions are used to implement multi-pass scheduling
548 on the first cycle. */
549 static rtx ready_element (struct ready_list *, int);
550 static rtx ready_remove (struct ready_list *, int);
551 static void ready_remove_insn (rtx);
552 static int max_issue (struct ready_list *, int *, int);
554 static rtx choose_ready (struct ready_list *);
556 static void fix_inter_tick (rtx, rtx);
557 static int fix_tick_ready (rtx);
558 static void change_queue_index (rtx, int);
559 static void resolve_dep (rtx, rtx);
561 /* The following functions are used to implement scheduling of data/control
562 speculative instructions. */
564 static void extend_h_i_d (void);
565 static void extend_ready (int);
566 static void extend_global (rtx);
567 static void extend_all (rtx);
568 static void init_h_i_d (rtx);
569 static void generate_recovery_code (rtx);
570 static void process_insn_depend_be_in_spec (rtx, rtx, ds_t);
571 static void begin_speculative_block (rtx);
572 static void add_to_speculative_block (rtx);
573 static dw_t dep_weak (ds_t);
574 static edge find_fallthru_edge (basic_block);
575 static void init_before_recovery (void);
576 static basic_block create_recovery_block (void);
577 static void create_check_block_twin (rtx, bool);
578 static void fix_recovery_deps (basic_block);
579 static void associate_line_notes_with_blocks (basic_block);
580 static void change_pattern (rtx, rtx);
581 static int speculate_insn (rtx, ds_t, rtx *);
582 static void dump_new_block_header (int, basic_block, rtx, rtx);
583 static void restore_bb_notes (basic_block);
584 static void extend_bb (basic_block);
585 static void fix_jump_move (rtx);
586 static void move_block_after_check (rtx);
587 static void move_succs (VEC(edge,gc) **, basic_block);
588 static void init_glat (void);
589 static void init_glat1 (basic_block);
590 static void attach_life_info1 (basic_block);
591 static void free_glat (void);
592 static void sched_remove_insn (rtx);
593 static void clear_priorities (rtx);
594 static void add_jump_dependencies (rtx, rtx);
595 static void calc_priorities (rtx);
596 #ifdef ENABLE_CHECKING
597 static int has_edge_p (VEC(edge,gc) *, int);
598 static void check_cfg (rtx, rtx);
599 static void check_sched_flags (void);
600 #endif
602 #endif /* INSN_SCHEDULING */
604 /* Point to state used for the current scheduling pass. */
605 struct sched_info *current_sched_info;
607 #ifndef INSN_SCHEDULING
608 void
609 schedule_insns (void)
612 #else
614 /* Working copy of frontend's sched_info variable. */
615 static struct sched_info current_sched_info_var;
617 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
618 so that insns independent of the last scheduled insn will be preferred
619 over dependent instructions. */
621 static rtx last_scheduled_insn;
623 /* Compute cost of executing INSN given the dependence LINK on the insn USED.
624 This is the number of cycles between instruction issue and
625 instruction results. */
627 HAIFA_INLINE int
628 insn_cost (rtx insn, rtx link, rtx used)
630 return insn_cost1 (insn, used ? REG_NOTE_KIND (link) : REG_NOTE_MAX,
631 link, used);
634 /* Compute cost of executing INSN given the dependence on the insn USED.
635 If LINK is not NULL, then its REG_NOTE_KIND is used as a dependence type.
636 Otherwise, dependence between INSN and USED is assumed to be of type
637 DEP_TYPE. This function was introduced as a workaround for
638 targetm.adjust_cost hook.
639 This is the number of cycles between instruction issue and
640 instruction results. */
642 HAIFA_INLINE static int
643 insn_cost1 (rtx insn, enum reg_note dep_type, rtx link, rtx used)
645 int cost = INSN_COST (insn);
647 if (cost < 0)
649 /* A USE insn, or something else we don't need to
650 understand. We can't pass these directly to
651 result_ready_cost or insn_default_latency because it will
652 trigger a fatal error for unrecognizable insns. */
653 if (recog_memoized (insn) < 0)
655 INSN_COST (insn) = 0;
656 return 0;
658 else
660 cost = insn_default_latency (insn);
661 if (cost < 0)
662 cost = 0;
664 INSN_COST (insn) = cost;
668 /* In this case estimate cost without caring how insn is used. */
669 if (used == 0)
670 return cost;
672 /* A USE insn should never require the value used to be computed.
673 This allows the computation of a function's result and parameter
674 values to overlap the return and call. */
675 if (recog_memoized (used) < 0)
676 cost = 0;
677 else
679 gcc_assert (!link || dep_type == REG_NOTE_KIND (link));
681 if (INSN_CODE (insn) >= 0)
683 if (dep_type == REG_DEP_ANTI)
684 cost = 0;
685 else if (dep_type == REG_DEP_OUTPUT)
687 cost = (insn_default_latency (insn)
688 - insn_default_latency (used));
689 if (cost <= 0)
690 cost = 1;
692 else if (bypass_p (insn))
693 cost = insn_latency (insn, used);
696 if (targetm.sched.adjust_cost_2)
697 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost);
698 else
700 gcc_assert (link);
701 if (targetm.sched.adjust_cost)
702 cost = targetm.sched.adjust_cost (used, link, insn, cost);
705 if (cost < 0)
706 cost = 0;
709 return cost;
712 /* Compute the priority number for INSN. */
714 static int
715 priority (rtx insn)
717 rtx link;
719 if (! INSN_P (insn))
720 return 0;
722 if (! INSN_PRIORITY_KNOWN (insn))
724 int this_priority = 0;
726 if (INSN_DEPEND (insn) == 0)
727 this_priority = insn_cost (insn, 0, 0);
728 else
730 rtx prev_first, twin;
731 basic_block rec;
733 /* For recovery check instructions we calculate priority slightly
734 different than that of normal instructions. Instead of walking
735 through INSN_DEPEND (check) list, we walk through INSN_DEPEND list
736 of each instruction in the corresponding recovery block. */
738 rec = RECOVERY_BLOCK (insn);
739 if (!rec || rec == EXIT_BLOCK_PTR)
741 prev_first = PREV_INSN (insn);
742 twin = insn;
744 else
746 prev_first = NEXT_INSN (BB_HEAD (rec));
747 twin = PREV_INSN (BB_END (rec));
752 for (link = INSN_DEPEND (twin); link; link = XEXP (link, 1))
754 rtx next;
755 int next_priority;
757 next = XEXP (link, 0);
759 if (BLOCK_FOR_INSN (next) != rec)
761 /* Critical path is meaningful in block boundaries
762 only. */
763 if (! (*current_sched_info->contributes_to_priority)
764 (next, insn)
765 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
766 then speculative instructions will less likely be
767 scheduled. That is because the priority of
768 their producers will increase, and, thus, the
769 producers will more likely be scheduled, thus,
770 resolving the dependence. */
771 || ((current_sched_info->flags & DO_SPECULATION)
772 && (DEP_STATUS (link) & SPECULATIVE)
773 && !(spec_info->flags
774 & COUNT_SPEC_IN_CRITICAL_PATH)))
775 continue;
777 next_priority = insn_cost1 (insn,
778 twin == insn ?
779 REG_NOTE_KIND (link) :
780 REG_DEP_ANTI,
781 twin == insn ? link : 0,
782 next) + priority (next);
784 if (next_priority > this_priority)
785 this_priority = next_priority;
789 twin = PREV_INSN (twin);
791 while (twin != prev_first);
793 INSN_PRIORITY (insn) = this_priority;
794 INSN_PRIORITY_KNOWN (insn) = 1;
797 return INSN_PRIORITY (insn);
800 /* Macros and functions for keeping the priority queue sorted, and
801 dealing with queuing and dequeuing of instructions. */
803 #define SCHED_SORT(READY, N_READY) \
804 do { if ((N_READY) == 2) \
805 swap_sort (READY, N_READY); \
806 else if ((N_READY) > 2) \
807 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
808 while (0)
810 /* Returns a positive value if x is preferred; returns a negative value if
811 y is preferred. Should never return 0, since that will make the sort
812 unstable. */
814 static int
815 rank_for_schedule (const void *x, const void *y)
817 rtx tmp = *(const rtx *) y;
818 rtx tmp2 = *(const rtx *) x;
819 rtx link;
820 int tmp_class, tmp2_class, depend_count1, depend_count2;
821 int val, priority_val, weight_val, info_val;
823 /* The insn in a schedule group should be issued the first. */
824 if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
825 return SCHED_GROUP_P (tmp2) ? 1 : -1;
827 /* Prefer insn with higher priority. */
828 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
830 if (priority_val)
831 return priority_val;
833 /* Prefer speculative insn with greater dependencies weakness. */
834 if (spec_info)
836 ds_t ds1, ds2;
837 dw_t dw1, dw2;
838 int dw;
840 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
841 if (ds1)
842 dw1 = dep_weak (ds1);
843 else
844 dw1 = NO_DEP_WEAK;
846 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
847 if (ds2)
848 dw2 = dep_weak (ds2);
849 else
850 dw2 = NO_DEP_WEAK;
852 dw = dw2 - dw1;
853 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
854 return dw;
857 /* Prefer an insn with smaller contribution to registers-pressure. */
858 if (!reload_completed &&
859 (weight_val = INSN_REG_WEIGHT (tmp) - INSN_REG_WEIGHT (tmp2)))
860 return weight_val;
862 info_val = (*current_sched_info->rank) (tmp, tmp2);
863 if (info_val)
864 return info_val;
866 /* Compare insns based on their relation to the last-scheduled-insn. */
867 if (INSN_P (last_scheduled_insn))
869 /* Classify the instructions into three classes:
870 1) Data dependent on last schedule insn.
871 2) Anti/Output dependent on last scheduled insn.
872 3) Independent of last scheduled insn, or has latency of one.
873 Choose the insn from the highest numbered class if different. */
874 link = find_insn_list (tmp, INSN_DEPEND (last_scheduled_insn));
875 if (link == 0 || insn_cost (last_scheduled_insn, link, tmp) == 1)
876 tmp_class = 3;
877 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
878 tmp_class = 1;
879 else
880 tmp_class = 2;
882 link = find_insn_list (tmp2, INSN_DEPEND (last_scheduled_insn));
883 if (link == 0 || insn_cost (last_scheduled_insn, link, tmp2) == 1)
884 tmp2_class = 3;
885 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
886 tmp2_class = 1;
887 else
888 tmp2_class = 2;
890 if ((val = tmp2_class - tmp_class))
891 return val;
894 /* Prefer the insn which has more later insns that depend on it.
895 This gives the scheduler more freedom when scheduling later
896 instructions at the expense of added register pressure. */
897 depend_count1 = 0;
898 for (link = INSN_DEPEND (tmp); link; link = XEXP (link, 1))
899 depend_count1++;
901 depend_count2 = 0;
902 for (link = INSN_DEPEND (tmp2); link; link = XEXP (link, 1))
903 depend_count2++;
905 val = depend_count2 - depend_count1;
906 if (val)
907 return val;
909 /* If insns are equally good, sort by INSN_LUID (original insn order),
910 so that we make the sort stable. This minimizes instruction movement,
911 thus minimizing sched's effect on debugging and cross-jumping. */
912 return INSN_LUID (tmp) - INSN_LUID (tmp2);
915 /* Resort the array A in which only element at index N may be out of order. */
917 HAIFA_INLINE static void
918 swap_sort (rtx *a, int n)
920 rtx insn = a[n - 1];
921 int i = n - 2;
923 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
925 a[i + 1] = a[i];
926 i -= 1;
928 a[i + 1] = insn;
931 /* Add INSN to the insn queue so that it can be executed at least
932 N_CYCLES after the currently executing insn. Preserve insns
933 chain for debugging purposes. */
935 HAIFA_INLINE static void
936 queue_insn (rtx insn, int n_cycles)
938 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
939 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
941 gcc_assert (n_cycles <= max_insn_queue_index);
943 insn_queue[next_q] = link;
944 q_size += 1;
946 if (sched_verbose >= 2)
948 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
949 (*current_sched_info->print_insn) (insn, 0));
951 fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
954 QUEUE_INDEX (insn) = next_q;
957 /* Remove INSN from queue. */
958 static void
959 queue_remove (rtx insn)
961 gcc_assert (QUEUE_INDEX (insn) >= 0);
962 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
963 q_size--;
964 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
967 /* Return a pointer to the bottom of the ready list, i.e. the insn
968 with the lowest priority. */
970 HAIFA_INLINE static rtx *
971 ready_lastpos (struct ready_list *ready)
973 gcc_assert (ready->n_ready >= 1);
974 return ready->vec + ready->first - ready->n_ready + 1;
977 /* Add an element INSN to the ready list so that it ends up with the
978 lowest/highest priority depending on FIRST_P. */
980 HAIFA_INLINE static void
981 ready_add (struct ready_list *ready, rtx insn, bool first_p)
983 if (!first_p)
985 if (ready->first == ready->n_ready)
987 memmove (ready->vec + ready->veclen - ready->n_ready,
988 ready_lastpos (ready),
989 ready->n_ready * sizeof (rtx));
990 ready->first = ready->veclen - 1;
992 ready->vec[ready->first - ready->n_ready] = insn;
994 else
996 if (ready->first == ready->veclen - 1)
998 if (ready->n_ready)
999 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1000 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1001 ready_lastpos (ready),
1002 ready->n_ready * sizeof (rtx));
1003 ready->first = ready->veclen - 2;
1005 ready->vec[++(ready->first)] = insn;
1008 ready->n_ready++;
1010 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1011 QUEUE_INDEX (insn) = QUEUE_READY;
1014 /* Remove the element with the highest priority from the ready list and
1015 return it. */
1017 HAIFA_INLINE static rtx
1018 ready_remove_first (struct ready_list *ready)
1020 rtx t;
1022 gcc_assert (ready->n_ready);
1023 t = ready->vec[ready->first--];
1024 ready->n_ready--;
1025 /* If the queue becomes empty, reset it. */
1026 if (ready->n_ready == 0)
1027 ready->first = ready->veclen - 1;
1029 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1030 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1032 return t;
1035 /* The following code implements multi-pass scheduling for the first
1036 cycle. In other words, we will try to choose ready insn which
1037 permits to start maximum number of insns on the same cycle. */
1039 /* Return a pointer to the element INDEX from the ready. INDEX for
1040 insn with the highest priority is 0, and the lowest priority has
1041 N_READY - 1. */
1043 HAIFA_INLINE static rtx
1044 ready_element (struct ready_list *ready, int index)
1046 gcc_assert (ready->n_ready && index < ready->n_ready);
1048 return ready->vec[ready->first - index];
1051 /* Remove the element INDEX from the ready list and return it. INDEX
1052 for insn with the highest priority is 0, and the lowest priority
1053 has N_READY - 1. */
1055 HAIFA_INLINE static rtx
1056 ready_remove (struct ready_list *ready, int index)
1058 rtx t;
1059 int i;
1061 if (index == 0)
1062 return ready_remove_first (ready);
1063 gcc_assert (ready->n_ready && index < ready->n_ready);
1064 t = ready->vec[ready->first - index];
1065 ready->n_ready--;
1066 for (i = index; i < ready->n_ready; i++)
1067 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1068 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1069 return t;
1072 /* Remove INSN from the ready list. */
1073 static void
1074 ready_remove_insn (rtx insn)
1076 int i;
1078 for (i = 0; i < readyp->n_ready; i++)
1079 if (ready_element (readyp, i) == insn)
1081 ready_remove (readyp, i);
1082 return;
1084 gcc_unreachable ();
1087 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1088 macro. */
1090 HAIFA_INLINE static void
1091 ready_sort (struct ready_list *ready)
1093 rtx *first = ready_lastpos (ready);
1094 SCHED_SORT (first, ready->n_ready);
1097 /* PREV is an insn that is ready to execute. Adjust its priority if that
1098 will help shorten or lengthen register lifetimes as appropriate. Also
1099 provide a hook for the target to tweek itself. */
1101 HAIFA_INLINE static void
1102 adjust_priority (rtx prev)
1104 /* ??? There used to be code here to try and estimate how an insn
1105 affected register lifetimes, but it did it by looking at REG_DEAD
1106 notes, which we removed in schedule_region. Nor did it try to
1107 take into account register pressure or anything useful like that.
1109 Revisit when we have a machine model to work with and not before. */
1111 if (targetm.sched.adjust_priority)
1112 INSN_PRIORITY (prev) =
1113 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1116 /* Advance time on one cycle. */
1117 HAIFA_INLINE static void
1118 advance_one_cycle (void)
1120 if (targetm.sched.dfa_pre_cycle_insn)
1121 state_transition (curr_state,
1122 targetm.sched.dfa_pre_cycle_insn ());
1124 state_transition (curr_state, NULL);
1126 if (targetm.sched.dfa_post_cycle_insn)
1127 state_transition (curr_state,
1128 targetm.sched.dfa_post_cycle_insn ());
1131 /* Clock at which the previous instruction was issued. */
1132 static int last_clock_var;
1134 /* INSN is the "currently executing insn". Launch each insn which was
1135 waiting on INSN. READY is the ready list which contains the insns
1136 that are ready to fire. CLOCK is the current cycle. The function
1137 returns necessary cycle advance after issuing the insn (it is not
1138 zero for insns in a schedule group). */
1140 static int
1141 schedule_insn (rtx insn)
1143 rtx link;
1144 int advance = 0;
1146 if (sched_verbose >= 1)
1148 char buf[2048];
1150 print_insn (buf, insn, 0);
1151 buf[40] = 0;
1152 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
1154 if (recog_memoized (insn) < 0)
1155 fprintf (sched_dump, "nothing");
1156 else
1157 print_reservation (sched_dump, insn);
1158 fputc ('\n', sched_dump);
1161 /* Scheduling instruction should have all its dependencies resolved and
1162 should have been removed from the ready list. */
1163 gcc_assert (INSN_DEP_COUNT (insn) == 0);
1164 gcc_assert (!LOG_LINKS (insn));
1165 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
1167 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
1169 /* Now we can free RESOLVED_DEPS list. */
1170 if (current_sched_info->flags & USE_DEPS_LIST)
1171 free_DEPS_LIST_list (&RESOLVED_DEPS (insn));
1172 else
1173 free_INSN_LIST_list (&RESOLVED_DEPS (insn));
1175 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1176 if (INSN_TICK (insn) > clock_var)
1177 /* INSN has been prematurely moved from the queue to the ready list.
1178 This is possible only if following flag is set. */
1179 gcc_assert (flag_sched_stalled_insns);
1181 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1182 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1183 INSN_TICK (insn) = clock_var;
1185 /* Update dependent instructions. */
1186 for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
1188 rtx next = XEXP (link, 0);
1190 resolve_dep (next, insn);
1192 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
1194 int effective_cost;
1196 effective_cost = try_ready (next);
1198 if (effective_cost >= 0
1199 && SCHED_GROUP_P (next)
1200 && advance < effective_cost)
1201 advance = effective_cost;
1203 else
1204 /* Check always has only one forward dependence (to the first insn in
1205 the recovery block), therefore, this will be executed only once. */
1207 gcc_assert (XEXP (link, 1) == 0);
1208 fix_recovery_deps (RECOVERY_BLOCK (insn));
1212 /* Annotate the instruction with issue information -- TImode
1213 indicates that the instruction is expected not to be able
1214 to issue on the same cycle as the previous insn. A machine
1215 may use this information to decide how the instruction should
1216 be aligned. */
1217 if (issue_rate > 1
1218 && GET_CODE (PATTERN (insn)) != USE
1219 && GET_CODE (PATTERN (insn)) != CLOBBER)
1221 if (reload_completed)
1222 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1223 last_clock_var = clock_var;
1226 return advance;
1229 /* Functions for handling of notes. */
1231 /* Delete notes beginning with INSN and put them in the chain
1232 of notes ended by NOTE_LIST.
1233 Returns the insn following the notes. */
1235 static rtx
1236 unlink_other_notes (rtx insn, rtx tail)
1238 rtx prev = PREV_INSN (insn);
1240 while (insn != tail && NOTE_NOT_BB_P (insn))
1242 rtx next = NEXT_INSN (insn);
1243 basic_block bb = BLOCK_FOR_INSN (insn);
1245 /* Delete the note from its current position. */
1246 if (prev)
1247 NEXT_INSN (prev) = next;
1248 if (next)
1249 PREV_INSN (next) = prev;
1251 if (bb)
1253 /* Basic block can begin with either LABEL or
1254 NOTE_INSN_BASIC_BLOCK. */
1255 gcc_assert (BB_HEAD (bb) != insn);
1257 /* Check if we are removing last insn in the BB. */
1258 if (BB_END (bb) == insn)
1259 BB_END (bb) = prev;
1262 /* See sched_analyze to see how these are handled. */
1263 if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
1264 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
1266 /* Insert the note at the end of the notes list. */
1267 PREV_INSN (insn) = note_list;
1268 if (note_list)
1269 NEXT_INSN (note_list) = insn;
1270 note_list = insn;
1273 insn = next;
1275 return insn;
1278 /* Delete line notes beginning with INSN. Record line-number notes so
1279 they can be reused. Returns the insn following the notes. */
1281 static rtx
1282 unlink_line_notes (rtx insn, rtx tail)
1284 rtx prev = PREV_INSN (insn);
1286 while (insn != tail && NOTE_NOT_BB_P (insn))
1288 rtx next = NEXT_INSN (insn);
1290 if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
1292 basic_block bb = BLOCK_FOR_INSN (insn);
1294 /* Delete the note from its current position. */
1295 if (prev)
1296 NEXT_INSN (prev) = next;
1297 if (next)
1298 PREV_INSN (next) = prev;
1300 if (bb)
1302 /* Basic block can begin with either LABEL or
1303 NOTE_INSN_BASIC_BLOCK. */
1304 gcc_assert (BB_HEAD (bb) != insn);
1306 /* Check if we are removing last insn in the BB. */
1307 if (BB_END (bb) == insn)
1308 BB_END (bb) = prev;
1311 /* Record line-number notes so they can be reused. */
1312 LINE_NOTE (insn) = insn;
1314 else
1315 prev = insn;
1317 insn = next;
1319 return insn;
1322 /* Return the head and tail pointers of ebb starting at BEG and ending
1323 at END. */
1325 void
1326 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
1328 rtx beg_head = BB_HEAD (beg);
1329 rtx beg_tail = BB_END (beg);
1330 rtx end_head = BB_HEAD (end);
1331 rtx end_tail = BB_END (end);
1333 /* Don't include any notes or labels at the beginning of the BEG
1334 basic block, or notes at the end of the END basic blocks. */
1336 if (LABEL_P (beg_head))
1337 beg_head = NEXT_INSN (beg_head);
1339 while (beg_head != beg_tail)
1340 if (NOTE_P (beg_head))
1341 beg_head = NEXT_INSN (beg_head);
1342 else
1343 break;
1345 *headp = beg_head;
1347 if (beg == end)
1348 end_head = beg_head;
1349 else if (LABEL_P (end_head))
1350 end_head = NEXT_INSN (end_head);
1352 while (end_head != end_tail)
1353 if (NOTE_P (end_tail))
1354 end_tail = PREV_INSN (end_tail);
1355 else
1356 break;
1358 *tailp = end_tail;
1361 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1364 no_real_insns_p (rtx head, rtx tail)
1366 while (head != NEXT_INSN (tail))
1368 if (!NOTE_P (head) && !LABEL_P (head))
1369 return 0;
1370 head = NEXT_INSN (head);
1372 return 1;
1375 /* Delete line notes from one block. Save them so they can be later restored
1376 (in restore_line_notes). HEAD and TAIL are the boundaries of the
1377 block in which notes should be processed. */
1379 void
1380 rm_line_notes (rtx head, rtx tail)
1382 rtx next_tail;
1383 rtx insn;
1385 next_tail = NEXT_INSN (tail);
1386 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1388 rtx prev;
1390 /* Farm out notes, and maybe save them in NOTE_LIST.
1391 This is needed to keep the debugger from
1392 getting completely deranged. */
1393 if (NOTE_NOT_BB_P (insn))
1395 prev = insn;
1396 insn = unlink_line_notes (insn, next_tail);
1398 gcc_assert (prev != tail && prev != head && insn != next_tail);
1403 /* Save line number notes for each insn in block B. HEAD and TAIL are
1404 the boundaries of the block in which notes should be processed. */
1406 void
1407 save_line_notes (int b, rtx head, rtx tail)
1409 rtx next_tail;
1411 /* We must use the true line number for the first insn in the block
1412 that was computed and saved at the start of this pass. We can't
1413 use the current line number, because scheduling of the previous
1414 block may have changed the current line number. */
1416 rtx line = line_note_head[b];
1417 rtx insn;
1419 next_tail = NEXT_INSN (tail);
1421 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1422 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1423 line = insn;
1424 else
1425 LINE_NOTE (insn) = line;
1428 /* After a block was scheduled, insert line notes into the insns list.
1429 HEAD and TAIL are the boundaries of the block in which notes should
1430 be processed. */
1432 void
1433 restore_line_notes (rtx head, rtx tail)
1435 rtx line, note, prev, new;
1436 int added_notes = 0;
1437 rtx next_tail, insn;
1439 head = head;
1440 next_tail = NEXT_INSN (tail);
1442 /* Determine the current line-number. We want to know the current
1443 line number of the first insn of the block here, in case it is
1444 different from the true line number that was saved earlier. If
1445 different, then we need a line number note before the first insn
1446 of this block. If it happens to be the same, then we don't want to
1447 emit another line number note here. */
1448 for (line = head; line; line = PREV_INSN (line))
1449 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
1450 break;
1452 /* Walk the insns keeping track of the current line-number and inserting
1453 the line-number notes as needed. */
1454 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1455 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1456 line = insn;
1457 /* This used to emit line number notes before every non-deleted note.
1458 However, this confuses a debugger, because line notes not separated
1459 by real instructions all end up at the same address. I can find no
1460 use for line number notes before other notes, so none are emitted. */
1461 else if (!NOTE_P (insn)
1462 && INSN_UID (insn) < old_max_uid
1463 && (note = LINE_NOTE (insn)) != 0
1464 && note != line
1465 && (line == 0
1466 #ifdef USE_MAPPED_LOCATION
1467 || NOTE_SOURCE_LOCATION (note) != NOTE_SOURCE_LOCATION (line)
1468 #else
1469 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
1470 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)
1471 #endif
1474 line = note;
1475 prev = PREV_INSN (insn);
1476 if (LINE_NOTE (note))
1478 /* Re-use the original line-number note. */
1479 LINE_NOTE (note) = 0;
1480 PREV_INSN (note) = prev;
1481 NEXT_INSN (prev) = note;
1482 PREV_INSN (insn) = note;
1483 NEXT_INSN (note) = insn;
1484 set_block_for_insn (note, BLOCK_FOR_INSN (insn));
1486 else
1488 added_notes++;
1489 new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
1490 #ifndef USE_MAPPED_LOCATION
1491 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
1492 #endif
1495 if (sched_verbose && added_notes)
1496 fprintf (sched_dump, ";; added %d line-number notes\n", added_notes);
1499 /* After scheduling the function, delete redundant line notes from the
1500 insns list. */
1502 void
1503 rm_redundant_line_notes (void)
1505 rtx line = 0;
1506 rtx insn = get_insns ();
1507 int active_insn = 0;
1508 int notes = 0;
1510 /* Walk the insns deleting redundant line-number notes. Many of these
1511 are already present. The remainder tend to occur at basic
1512 block boundaries. */
1513 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
1514 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1516 /* If there are no active insns following, INSN is redundant. */
1517 if (active_insn == 0)
1519 notes++;
1520 SET_INSN_DELETED (insn);
1522 /* If the line number is unchanged, LINE is redundant. */
1523 else if (line
1524 #ifdef USE_MAPPED_LOCATION
1525 && NOTE_SOURCE_LOCATION (line) == NOTE_SOURCE_LOCATION (insn)
1526 #else
1527 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
1528 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn)
1529 #endif
1532 notes++;
1533 SET_INSN_DELETED (line);
1534 line = insn;
1536 else
1537 line = insn;
1538 active_insn = 0;
1540 else if (!((NOTE_P (insn)
1541 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
1542 || (NONJUMP_INSN_P (insn)
1543 && (GET_CODE (PATTERN (insn)) == USE
1544 || GET_CODE (PATTERN (insn)) == CLOBBER))))
1545 active_insn++;
1547 if (sched_verbose && notes)
1548 fprintf (sched_dump, ";; deleted %d line-number notes\n", notes);
1551 /* Delete notes between HEAD and TAIL and put them in the chain
1552 of notes ended by NOTE_LIST. */
1554 void
1555 rm_other_notes (rtx head, rtx tail)
1557 rtx next_tail;
1558 rtx insn;
1560 note_list = 0;
1561 if (head == tail && (! INSN_P (head)))
1562 return;
1564 next_tail = NEXT_INSN (tail);
1565 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1567 rtx prev;
1569 /* Farm out notes, and maybe save them in NOTE_LIST.
1570 This is needed to keep the debugger from
1571 getting completely deranged. */
1572 if (NOTE_NOT_BB_P (insn))
1574 prev = insn;
1576 insn = unlink_other_notes (insn, next_tail);
1578 gcc_assert (prev != tail && prev != head && insn != next_tail);
1583 /* Functions for computation of registers live/usage info. */
1585 /* This function looks for a new register being defined.
1586 If the destination register is already used by the source,
1587 a new register is not needed. */
1589 static int
1590 find_set_reg_weight (rtx x)
1592 if (GET_CODE (x) == CLOBBER
1593 && register_operand (SET_DEST (x), VOIDmode))
1594 return 1;
1595 if (GET_CODE (x) == SET
1596 && register_operand (SET_DEST (x), VOIDmode))
1598 if (REG_P (SET_DEST (x)))
1600 if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
1601 return 1;
1602 else
1603 return 0;
1605 return 1;
1607 return 0;
1610 /* Calculate INSN_REG_WEIGHT for all insns of a block. */
1612 static void
1613 find_insn_reg_weight (basic_block bb)
1615 rtx insn, next_tail, head, tail;
1617 get_ebb_head_tail (bb, bb, &head, &tail);
1618 next_tail = NEXT_INSN (tail);
1620 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1621 find_insn_reg_weight1 (insn);
1624 /* Calculate INSN_REG_WEIGHT for single instruction.
1625 Separated from find_insn_reg_weight because of need
1626 to initialize new instruction in generate_recovery_code. */
1627 static void
1628 find_insn_reg_weight1 (rtx insn)
1630 int reg_weight = 0;
1631 rtx x;
1633 /* Handle register life information. */
1634 if (! INSN_P (insn))
1635 return;
1637 /* Increment weight for each register born here. */
1638 x = PATTERN (insn);
1639 reg_weight += find_set_reg_weight (x);
1640 if (GET_CODE (x) == PARALLEL)
1642 int j;
1643 for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
1645 x = XVECEXP (PATTERN (insn), 0, j);
1646 reg_weight += find_set_reg_weight (x);
1649 /* Decrement weight for each register that dies here. */
1650 for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
1652 if (REG_NOTE_KIND (x) == REG_DEAD
1653 || REG_NOTE_KIND (x) == REG_UNUSED)
1654 reg_weight--;
1657 INSN_REG_WEIGHT (insn) = reg_weight;
1660 /* Move insns that became ready to fire from queue to ready list. */
1662 static void
1663 queue_to_ready (struct ready_list *ready)
1665 rtx insn;
1666 rtx link;
1668 q_ptr = NEXT_Q (q_ptr);
1670 /* Add all pending insns that can be scheduled without stalls to the
1671 ready list. */
1672 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
1674 insn = XEXP (link, 0);
1675 q_size -= 1;
1677 if (sched_verbose >= 2)
1678 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1679 (*current_sched_info->print_insn) (insn, 0));
1681 /* If the ready list is full, delay the insn for 1 cycle.
1682 See the comment in schedule_block for the rationale. */
1683 if (!reload_completed
1684 && ready->n_ready > MAX_SCHED_READY_INSNS
1685 && !SCHED_GROUP_P (insn))
1687 if (sched_verbose >= 2)
1688 fprintf (sched_dump, "requeued because ready full\n");
1689 queue_insn (insn, 1);
1691 else
1693 ready_add (ready, insn, false);
1694 if (sched_verbose >= 2)
1695 fprintf (sched_dump, "moving to ready without stalls\n");
1698 free_INSN_LIST_list (&insn_queue[q_ptr]);
1700 /* If there are no ready insns, stall until one is ready and add all
1701 of the pending insns at that point to the ready list. */
1702 if (ready->n_ready == 0)
1704 int stalls;
1706 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
1708 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1710 for (; link; link = XEXP (link, 1))
1712 insn = XEXP (link, 0);
1713 q_size -= 1;
1715 if (sched_verbose >= 2)
1716 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1717 (*current_sched_info->print_insn) (insn, 0));
1719 ready_add (ready, insn, false);
1720 if (sched_verbose >= 2)
1721 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
1723 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
1725 advance_one_cycle ();
1727 break;
1730 advance_one_cycle ();
1733 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
1734 clock_var += stalls;
1738 /* Used by early_queue_to_ready. Determines whether it is "ok" to
1739 prematurely move INSN from the queue to the ready list. Currently,
1740 if a target defines the hook 'is_costly_dependence', this function
1741 uses the hook to check whether there exist any dependences which are
1742 considered costly by the target, between INSN and other insns that
1743 have already been scheduled. Dependences are checked up to Y cycles
1744 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
1745 controlling this value.
1746 (Other considerations could be taken into account instead (or in
1747 addition) depending on user flags and target hooks. */
1749 static bool
1750 ok_for_early_queue_removal (rtx insn)
1752 int n_cycles;
1753 rtx prev_insn = last_scheduled_insn;
1755 if (targetm.sched.is_costly_dependence)
1757 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
1759 for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
1761 rtx dep_link = 0;
1762 int dep_cost;
1764 if (!NOTE_P (prev_insn))
1766 dep_link = find_insn_list (insn, INSN_DEPEND (prev_insn));
1767 if (dep_link)
1769 dep_cost = insn_cost (prev_insn, dep_link, insn) ;
1770 if (targetm.sched.is_costly_dependence (prev_insn, insn,
1771 dep_link, dep_cost,
1772 flag_sched_stalled_insns_dep - n_cycles))
1773 return false;
1777 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
1778 break;
1781 if (!prev_insn)
1782 break;
1783 prev_insn = PREV_INSN (prev_insn);
1787 return true;
1791 /* Remove insns from the queue, before they become "ready" with respect
1792 to FU latency considerations. */
1794 static int
1795 early_queue_to_ready (state_t state, struct ready_list *ready)
1797 rtx insn;
1798 rtx link;
1799 rtx next_link;
1800 rtx prev_link;
1801 bool move_to_ready;
1802 int cost;
1803 state_t temp_state = alloca (dfa_state_size);
1804 int stalls;
1805 int insns_removed = 0;
1808 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
1809 function:
1811 X == 0: There is no limit on how many queued insns can be removed
1812 prematurely. (flag_sched_stalled_insns = -1).
1814 X >= 1: Only X queued insns can be removed prematurely in each
1815 invocation. (flag_sched_stalled_insns = X).
1817 Otherwise: Early queue removal is disabled.
1818 (flag_sched_stalled_insns = 0)
1821 if (! flag_sched_stalled_insns)
1822 return 0;
1824 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
1826 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1828 if (sched_verbose > 6)
1829 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
1831 prev_link = 0;
1832 while (link)
1834 next_link = XEXP (link, 1);
1835 insn = XEXP (link, 0);
1836 if (insn && sched_verbose > 6)
1837 print_rtl_single (sched_dump, insn);
1839 memcpy (temp_state, state, dfa_state_size);
1840 if (recog_memoized (insn) < 0)
1841 /* non-negative to indicate that it's not ready
1842 to avoid infinite Q->R->Q->R... */
1843 cost = 0;
1844 else
1845 cost = state_transition (temp_state, insn);
1847 if (sched_verbose >= 6)
1848 fprintf (sched_dump, "transition cost = %d\n", cost);
1850 move_to_ready = false;
1851 if (cost < 0)
1853 move_to_ready = ok_for_early_queue_removal (insn);
1854 if (move_to_ready == true)
1856 /* move from Q to R */
1857 q_size -= 1;
1858 ready_add (ready, insn, false);
1860 if (prev_link)
1861 XEXP (prev_link, 1) = next_link;
1862 else
1863 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
1865 free_INSN_LIST_node (link);
1867 if (sched_verbose >= 2)
1868 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
1869 (*current_sched_info->print_insn) (insn, 0));
1871 insns_removed++;
1872 if (insns_removed == flag_sched_stalled_insns)
1873 /* Remove no more than flag_sched_stalled_insns insns
1874 from Q at a time. */
1875 return insns_removed;
1879 if (move_to_ready == false)
1880 prev_link = link;
1882 link = next_link;
1883 } /* while link */
1884 } /* if link */
1886 } /* for stalls.. */
1888 return insns_removed;
1892 /* Print the ready list for debugging purposes. Callable from debugger. */
1894 static void
1895 debug_ready_list (struct ready_list *ready)
1897 rtx *p;
1898 int i;
1900 if (ready->n_ready == 0)
1902 fprintf (sched_dump, "\n");
1903 return;
1906 p = ready_lastpos (ready);
1907 for (i = 0; i < ready->n_ready; i++)
1908 fprintf (sched_dump, " %s", (*current_sched_info->print_insn) (p[i], 0));
1909 fprintf (sched_dump, "\n");
1912 /* Search INSN for REG_SAVE_NOTE note pairs for
1913 NOTE_INSN_EHREGION_{BEG,END}; and convert them back into
1914 NOTEs. The REG_SAVE_NOTE note following first one is contains the
1915 saved value for NOTE_BLOCK_NUMBER which is useful for
1916 NOTE_INSN_EH_REGION_{BEG,END} NOTEs. */
1918 static void
1919 reemit_notes (rtx insn)
1921 rtx note, last = insn;
1923 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1925 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
1927 enum insn_note note_type = INTVAL (XEXP (note, 0));
1929 last = emit_note_before (note_type, last);
1930 remove_note (insn, note);
1935 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
1936 static void
1937 move_insn (rtx insn)
1939 rtx last = last_scheduled_insn;
1941 if (PREV_INSN (insn) != last)
1943 basic_block bb;
1944 rtx note;
1945 int jump_p = 0;
1947 bb = BLOCK_FOR_INSN (insn);
1949 /* BB_HEAD is either LABEL or NOTE. */
1950 gcc_assert (BB_HEAD (bb) != insn);
1952 if (BB_END (bb) == insn)
1953 /* If this is last instruction in BB, move end marker one
1954 instruction up. */
1956 /* Jumps are always placed at the end of basic block. */
1957 jump_p = control_flow_insn_p (insn);
1959 gcc_assert (!jump_p
1960 || ((current_sched_info->flags & SCHED_RGN)
1961 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
1962 || (current_sched_info->flags & SCHED_EBB));
1964 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
1966 BB_END (bb) = PREV_INSN (insn);
1969 gcc_assert (BB_END (bb) != last);
1971 if (jump_p)
1972 /* We move the block note along with jump. */
1974 /* NT is needed for assertion below. */
1975 rtx nt = current_sched_info->next_tail;
1977 note = NEXT_INSN (insn);
1978 while (NOTE_NOT_BB_P (note) && note != nt)
1979 note = NEXT_INSN (note);
1981 if (note != nt
1982 && (LABEL_P (note)
1983 || BARRIER_P (note)))
1984 note = NEXT_INSN (note);
1986 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
1988 else
1989 note = insn;
1991 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
1992 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
1994 NEXT_INSN (note) = NEXT_INSN (last);
1995 PREV_INSN (NEXT_INSN (last)) = note;
1997 NEXT_INSN (last) = insn;
1998 PREV_INSN (insn) = last;
2000 bb = BLOCK_FOR_INSN (last);
2002 if (jump_p)
2004 fix_jump_move (insn);
2006 if (BLOCK_FOR_INSN (insn) != bb)
2007 move_block_after_check (insn);
2009 gcc_assert (BB_END (bb) == last);
2012 set_block_for_insn (insn, bb);
2014 /* Update BB_END, if needed. */
2015 if (BB_END (bb) == last)
2016 BB_END (bb) = insn;
2019 reemit_notes (insn);
2021 SCHED_GROUP_P (insn) = 0;
2024 /* The following structure describe an entry of the stack of choices. */
2025 struct choice_entry
2027 /* Ordinal number of the issued insn in the ready queue. */
2028 int index;
2029 /* The number of the rest insns whose issues we should try. */
2030 int rest;
2031 /* The number of issued essential insns. */
2032 int n;
2033 /* State after issuing the insn. */
2034 state_t state;
2037 /* The following array is used to implement a stack of choices used in
2038 function max_issue. */
2039 static struct choice_entry *choice_stack;
2041 /* The following variable value is number of essential insns issued on
2042 the current cycle. An insn is essential one if it changes the
2043 processors state. */
2044 static int cycle_issued_insns;
2046 /* The following variable value is maximal number of tries of issuing
2047 insns for the first cycle multipass insn scheduling. We define
2048 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
2049 need this constraint if all real insns (with non-negative codes)
2050 had reservations because in this case the algorithm complexity is
2051 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
2052 might be incomplete and such insn might occur. For such
2053 descriptions, the complexity of algorithm (without the constraint)
2054 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
2055 static int max_lookahead_tries;
2057 /* The following value is value of hook
2058 `first_cycle_multipass_dfa_lookahead' at the last call of
2059 `max_issue'. */
2060 static int cached_first_cycle_multipass_dfa_lookahead = 0;
2062 /* The following value is value of `issue_rate' at the last call of
2063 `sched_init'. */
2064 static int cached_issue_rate = 0;
2066 /* The following function returns maximal (or close to maximal) number
2067 of insns which can be issued on the same cycle and one of which
2068 insns is insns with the best rank (the first insn in READY). To
2069 make this function tries different samples of ready insns. READY
2070 is current queue `ready'. Global array READY_TRY reflects what
2071 insns are already issued in this try. MAX_POINTS is the sum of points
2072 of all instructions in READY. The function stops immediately,
2073 if it reached the such a solution, that all instruction can be issued.
2074 INDEX will contain index of the best insn in READY. The following
2075 function is used only for first cycle multipass scheduling. */
2076 static int
2077 max_issue (struct ready_list *ready, int *index, int max_points)
2079 int n, i, all, n_ready, best, delay, tries_num, points = -1;
2080 struct choice_entry *top;
2081 rtx insn;
2083 best = 0;
2084 memcpy (choice_stack->state, curr_state, dfa_state_size);
2085 top = choice_stack;
2086 top->rest = cached_first_cycle_multipass_dfa_lookahead;
2087 top->n = 0;
2088 n_ready = ready->n_ready;
2089 for (all = i = 0; i < n_ready; i++)
2090 if (!ready_try [i])
2091 all++;
2092 i = 0;
2093 tries_num = 0;
2094 for (;;)
2096 if (top->rest == 0 || i >= n_ready)
2098 if (top == choice_stack)
2099 break;
2100 if (best < top - choice_stack && ready_try [0])
2102 best = top - choice_stack;
2103 *index = choice_stack [1].index;
2104 points = top->n;
2105 if (top->n == max_points || best == all)
2106 break;
2108 i = top->index;
2109 ready_try [i] = 0;
2110 top--;
2111 memcpy (curr_state, top->state, dfa_state_size);
2113 else if (!ready_try [i])
2115 tries_num++;
2116 if (tries_num > max_lookahead_tries)
2117 break;
2118 insn = ready_element (ready, i);
2119 delay = state_transition (curr_state, insn);
2120 if (delay < 0)
2122 if (state_dead_lock_p (curr_state))
2123 top->rest = 0;
2124 else
2125 top->rest--;
2126 n = top->n;
2127 if (memcmp (top->state, curr_state, dfa_state_size) != 0)
2128 n += ISSUE_POINTS (insn);
2129 top++;
2130 top->rest = cached_first_cycle_multipass_dfa_lookahead;
2131 top->index = i;
2132 top->n = n;
2133 memcpy (top->state, curr_state, dfa_state_size);
2134 ready_try [i] = 1;
2135 i = -1;
2138 i++;
2140 while (top != choice_stack)
2142 ready_try [top->index] = 0;
2143 top--;
2145 memcpy (curr_state, choice_stack->state, dfa_state_size);
2147 if (sched_verbose >= 4)
2148 fprintf (sched_dump, ";;\t\tChoosed insn : %s; points: %d/%d\n",
2149 (*current_sched_info->print_insn) (ready_element (ready, *index),
2150 0),
2151 points, max_points);
2153 return best;
2156 /* The following function chooses insn from READY and modifies
2157 *N_READY and READY. The following function is used only for first
2158 cycle multipass scheduling. */
2160 static rtx
2161 choose_ready (struct ready_list *ready)
2163 int lookahead = 0;
2165 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
2166 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
2167 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0)))
2168 return ready_remove_first (ready);
2169 else
2171 /* Try to choose the better insn. */
2172 int index = 0, i, n;
2173 rtx insn;
2174 int more_issue, max_points, try_data = 1, try_control = 1;
2176 if (cached_first_cycle_multipass_dfa_lookahead != lookahead)
2178 cached_first_cycle_multipass_dfa_lookahead = lookahead;
2179 max_lookahead_tries = 100;
2180 for (i = 0; i < issue_rate; i++)
2181 max_lookahead_tries *= lookahead;
2183 insn = ready_element (ready, 0);
2184 if (INSN_CODE (insn) < 0)
2185 return ready_remove_first (ready);
2187 if (spec_info
2188 && spec_info->flags & (PREFER_NON_DATA_SPEC
2189 | PREFER_NON_CONTROL_SPEC))
2191 for (i = 0, n = ready->n_ready; i < n; i++)
2193 rtx x;
2194 ds_t s;
2196 x = ready_element (ready, i);
2197 s = TODO_SPEC (x);
2199 if (spec_info->flags & PREFER_NON_DATA_SPEC
2200 && !(s & DATA_SPEC))
2202 try_data = 0;
2203 if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
2204 || !try_control)
2205 break;
2208 if (spec_info->flags & PREFER_NON_CONTROL_SPEC
2209 && !(s & CONTROL_SPEC))
2211 try_control = 0;
2212 if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
2213 break;
2218 if ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2219 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
2220 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2221 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2222 (insn)))
2223 /* Discard speculative instruction that stands first in the ready
2224 list. */
2226 change_queue_index (insn, 1);
2227 return 0;
2230 max_points = ISSUE_POINTS (insn);
2231 more_issue = issue_rate - cycle_issued_insns - 1;
2233 for (i = 1; i < ready->n_ready; i++)
2235 insn = ready_element (ready, i);
2236 ready_try [i]
2237 = (INSN_CODE (insn) < 0
2238 || (!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2239 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
2240 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2241 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2242 (insn)));
2244 if (!ready_try [i] && more_issue-- > 0)
2245 max_points += ISSUE_POINTS (insn);
2248 if (max_issue (ready, &index, max_points) == 0)
2249 return ready_remove_first (ready);
2250 else
2251 return ready_remove (ready, index);
2255 /* Use forward list scheduling to rearrange insns of block pointed to by
2256 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2257 region. */
2259 void
2260 schedule_block (basic_block *target_bb, int rgn_n_insns1)
2262 struct ready_list ready;
2263 int i, first_cycle_insn_p;
2264 int can_issue_more;
2265 state_t temp_state = NULL; /* It is used for multipass scheduling. */
2266 int sort_p, advance, start_clock_var;
2268 /* Head/tail info for this block. */
2269 rtx prev_head = current_sched_info->prev_head;
2270 rtx next_tail = current_sched_info->next_tail;
2271 rtx head = NEXT_INSN (prev_head);
2272 rtx tail = PREV_INSN (next_tail);
2274 /* We used to have code to avoid getting parameters moved from hard
2275 argument registers into pseudos.
2277 However, it was removed when it proved to be of marginal benefit
2278 and caused problems because schedule_block and compute_forward_dependences
2279 had different notions of what the "head" insn was. */
2281 gcc_assert (head != tail || INSN_P (head));
2283 added_recovery_block_p = false;
2285 /* Debug info. */
2286 if (sched_verbose)
2287 dump_new_block_header (0, *target_bb, head, tail);
2289 state_reset (curr_state);
2291 /* Allocate the ready list. */
2292 readyp = &ready;
2293 ready.vec = NULL;
2294 ready_try = NULL;
2295 choice_stack = NULL;
2297 rgn_n_insns = -1;
2298 extend_ready (rgn_n_insns1 + 1);
2300 ready.first = ready.veclen - 1;
2301 ready.n_ready = 0;
2303 /* It is used for first cycle multipass scheduling. */
2304 temp_state = alloca (dfa_state_size);
2306 if (targetm.sched.md_init)
2307 targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen);
2309 /* We start inserting insns after PREV_HEAD. */
2310 last_scheduled_insn = prev_head;
2312 gcc_assert (NOTE_P (last_scheduled_insn)
2313 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
2315 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2316 queue. */
2317 q_ptr = 0;
2318 q_size = 0;
2320 insn_queue = alloca ((max_insn_queue_index + 1) * sizeof (rtx));
2321 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
2323 /* Start just before the beginning of time. */
2324 clock_var = -1;
2326 /* We need queue and ready lists and clock_var be initialized
2327 in try_ready () (which is called through init_ready_list ()). */
2328 (*current_sched_info->init_ready_list) ();
2330 /* The algorithm is O(n^2) in the number of ready insns at any given
2331 time in the worst case. Before reload we are more likely to have
2332 big lists so truncate them to a reasonable size. */
2333 if (!reload_completed && ready.n_ready > MAX_SCHED_READY_INSNS)
2335 ready_sort (&ready);
2337 /* Find first free-standing insn past MAX_SCHED_READY_INSNS. */
2338 for (i = MAX_SCHED_READY_INSNS; i < ready.n_ready; i++)
2339 if (!SCHED_GROUP_P (ready_element (&ready, i)))
2340 break;
2342 if (sched_verbose >= 2)
2344 fprintf (sched_dump,
2345 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
2346 fprintf (sched_dump,
2347 ";;\t\t before reload => truncated to %d insns\n", i);
2350 /* Delay all insns past it for 1 cycle. */
2351 while (i < ready.n_ready)
2352 queue_insn (ready_remove (&ready, i), 1);
2355 /* Now we can restore basic block notes and maintain precise cfg. */
2356 restore_bb_notes (*target_bb);
2358 last_clock_var = -1;
2360 advance = 0;
2362 sort_p = TRUE;
2363 /* Loop until all the insns in BB are scheduled. */
2364 while ((*current_sched_info->schedule_more_p) ())
2368 start_clock_var = clock_var;
2370 clock_var++;
2372 advance_one_cycle ();
2374 /* Add to the ready list all pending insns that can be issued now.
2375 If there are no ready insns, increment clock until one
2376 is ready and add all pending insns at that point to the ready
2377 list. */
2378 queue_to_ready (&ready);
2380 gcc_assert (ready.n_ready);
2382 if (sched_verbose >= 2)
2384 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
2385 debug_ready_list (&ready);
2387 advance -= clock_var - start_clock_var;
2389 while (advance > 0);
2391 if (sort_p)
2393 /* Sort the ready list based on priority. */
2394 ready_sort (&ready);
2396 if (sched_verbose >= 2)
2398 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
2399 debug_ready_list (&ready);
2403 /* Allow the target to reorder the list, typically for
2404 better instruction bundling. */
2405 if (sort_p && targetm.sched.reorder
2406 && (ready.n_ready == 0
2407 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2408 can_issue_more =
2409 targetm.sched.reorder (sched_dump, sched_verbose,
2410 ready_lastpos (&ready),
2411 &ready.n_ready, clock_var);
2412 else
2413 can_issue_more = issue_rate;
2415 first_cycle_insn_p = 1;
2416 cycle_issued_insns = 0;
2417 for (;;)
2419 rtx insn;
2420 int cost;
2421 bool asm_p = false;
2423 if (sched_verbose >= 2)
2425 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
2426 clock_var);
2427 debug_ready_list (&ready);
2430 if (ready.n_ready == 0
2431 && can_issue_more
2432 && reload_completed)
2434 /* Allow scheduling insns directly from the queue in case
2435 there's nothing better to do (ready list is empty) but
2436 there are still vacant dispatch slots in the current cycle. */
2437 if (sched_verbose >= 6)
2438 fprintf(sched_dump,";;\t\tSecond chance\n");
2439 memcpy (temp_state, curr_state, dfa_state_size);
2440 if (early_queue_to_ready (temp_state, &ready))
2441 ready_sort (&ready);
2444 if (ready.n_ready == 0 || !can_issue_more
2445 || state_dead_lock_p (curr_state)
2446 || !(*current_sched_info->schedule_more_p) ())
2447 break;
2449 /* Select and remove the insn from the ready list. */
2450 if (sort_p)
2452 insn = choose_ready (&ready);
2453 if (!insn)
2454 continue;
2456 else
2457 insn = ready_remove_first (&ready);
2459 if (targetm.sched.dfa_new_cycle
2460 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
2461 insn, last_clock_var,
2462 clock_var, &sort_p))
2463 /* SORT_P is used by the target to override sorting
2464 of the ready list. This is needed when the target
2465 has modified its internal structures expecting that
2466 the insn will be issued next. As we need the insn
2467 to have the highest priority (so it will be returned by
2468 the ready_remove_first call above), we invoke
2469 ready_add (&ready, insn, true).
2470 But, still, there is one issue: INSN can be later
2471 discarded by scheduler's front end through
2472 current_sched_info->can_schedule_ready_p, hence, won't
2473 be issued next. */
2475 ready_add (&ready, insn, true);
2476 break;
2479 sort_p = TRUE;
2480 memcpy (temp_state, curr_state, dfa_state_size);
2481 if (recog_memoized (insn) < 0)
2483 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
2484 || asm_noperands (PATTERN (insn)) >= 0);
2485 if (!first_cycle_insn_p && asm_p)
2486 /* This is asm insn which is tryed to be issued on the
2487 cycle not first. Issue it on the next cycle. */
2488 cost = 1;
2489 else
2490 /* A USE insn, or something else we don't need to
2491 understand. We can't pass these directly to
2492 state_transition because it will trigger a
2493 fatal error for unrecognizable insns. */
2494 cost = 0;
2496 else
2498 cost = state_transition (temp_state, insn);
2499 if (cost < 0)
2500 cost = 0;
2501 else if (cost == 0)
2502 cost = 1;
2505 if (cost >= 1)
2507 queue_insn (insn, cost);
2508 if (SCHED_GROUP_P (insn))
2510 advance = cost;
2511 break;
2514 continue;
2517 if (current_sched_info->can_schedule_ready_p
2518 && ! (*current_sched_info->can_schedule_ready_p) (insn))
2519 /* We normally get here only if we don't want to move
2520 insn from the split block. */
2522 TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
2523 continue;
2526 /* DECISION is made. */
2528 if (TODO_SPEC (insn) & SPECULATIVE)
2529 generate_recovery_code (insn);
2531 if (control_flow_insn_p (last_scheduled_insn)
2532 /* This is used to to switch basic blocks by request
2533 from scheduler front-end (actually, sched-ebb.c only).
2534 This is used to process blocks with single fallthru
2535 edge. If succeeding block has jump, it [jump] will try
2536 move at the end of current bb, thus corrupting CFG. */
2537 || current_sched_info->advance_target_bb (*target_bb, insn))
2539 *target_bb = current_sched_info->advance_target_bb
2540 (*target_bb, 0);
2542 if (sched_verbose)
2544 rtx x;
2546 x = next_real_insn (last_scheduled_insn);
2547 gcc_assert (x);
2548 dump_new_block_header (1, *target_bb, x, tail);
2551 last_scheduled_insn = bb_note (*target_bb);
2554 /* Update counters, etc in the scheduler's front end. */
2555 (*current_sched_info->begin_schedule_ready) (insn,
2556 last_scheduled_insn);
2558 move_insn (insn);
2559 last_scheduled_insn = insn;
2561 if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
2563 cycle_issued_insns++;
2564 memcpy (curr_state, temp_state, dfa_state_size);
2567 if (targetm.sched.variable_issue)
2568 can_issue_more =
2569 targetm.sched.variable_issue (sched_dump, sched_verbose,
2570 insn, can_issue_more);
2571 /* A naked CLOBBER or USE generates no instruction, so do
2572 not count them against the issue rate. */
2573 else if (GET_CODE (PATTERN (insn)) != USE
2574 && GET_CODE (PATTERN (insn)) != CLOBBER)
2575 can_issue_more--;
2577 advance = schedule_insn (insn);
2579 /* After issuing an asm insn we should start a new cycle. */
2580 if (advance == 0 && asm_p)
2581 advance = 1;
2582 if (advance != 0)
2583 break;
2585 first_cycle_insn_p = 0;
2587 /* Sort the ready list based on priority. This must be
2588 redone here, as schedule_insn may have readied additional
2589 insns that will not be sorted correctly. */
2590 if (ready.n_ready > 0)
2591 ready_sort (&ready);
2593 if (targetm.sched.reorder2
2594 && (ready.n_ready == 0
2595 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2597 can_issue_more =
2598 targetm.sched.reorder2 (sched_dump, sched_verbose,
2599 ready.n_ready
2600 ? ready_lastpos (&ready) : NULL,
2601 &ready.n_ready, clock_var);
2606 /* Debug info. */
2607 if (sched_verbose)
2609 fprintf (sched_dump, ";;\tReady list (final): ");
2610 debug_ready_list (&ready);
2613 if (current_sched_info->queue_must_finish_empty)
2614 /* Sanity check -- queue must be empty now. Meaningless if region has
2615 multiple bbs. */
2616 gcc_assert (!q_size && !ready.n_ready);
2617 else
2619 /* We must maintain QUEUE_INDEX between blocks in region. */
2620 for (i = ready.n_ready - 1; i >= 0; i--)
2622 rtx x;
2624 x = ready_element (&ready, i);
2625 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2626 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2629 if (q_size)
2630 for (i = 0; i <= max_insn_queue_index; i++)
2632 rtx link;
2633 for (link = insn_queue[i]; link; link = XEXP (link, 1))
2635 rtx x;
2637 x = XEXP (link, 0);
2638 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2639 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2641 free_INSN_LIST_list (&insn_queue[i]);
2645 if (!current_sched_info->queue_must_finish_empty
2646 || added_recovery_block_p)
2648 /* INSN_TICK (minimum clock tick at which the insn becomes
2649 ready) may be not correct for the insn in the subsequent
2650 blocks of the region. We should use a correct value of
2651 `clock_var' or modify INSN_TICK. It is better to keep
2652 clock_var value equal to 0 at the start of a basic block.
2653 Therefore we modify INSN_TICK here. */
2654 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
2657 if (targetm.sched.md_finish)
2658 targetm.sched.md_finish (sched_dump, sched_verbose);
2660 /* Update head/tail boundaries. */
2661 head = NEXT_INSN (prev_head);
2662 tail = last_scheduled_insn;
2664 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
2665 previously found among the insns. Insert them at the beginning
2666 of the insns. */
2667 if (note_list != 0)
2669 basic_block head_bb = BLOCK_FOR_INSN (head);
2670 rtx note_head = note_list;
2672 while (PREV_INSN (note_head))
2674 set_block_for_insn (note_head, head_bb);
2675 note_head = PREV_INSN (note_head);
2677 /* In the above cycle we've missed this note: */
2678 set_block_for_insn (note_head, head_bb);
2680 PREV_INSN (note_head) = PREV_INSN (head);
2681 NEXT_INSN (PREV_INSN (head)) = note_head;
2682 PREV_INSN (head) = note_list;
2683 NEXT_INSN (note_list) = head;
2684 head = note_head;
2687 /* Debugging. */
2688 if (sched_verbose)
2690 fprintf (sched_dump, ";; total time = %d\n;; new head = %d\n",
2691 clock_var, INSN_UID (head));
2692 fprintf (sched_dump, ";; new tail = %d\n\n",
2693 INSN_UID (tail));
2696 current_sched_info->head = head;
2697 current_sched_info->tail = tail;
2699 free (ready.vec);
2701 free (ready_try);
2702 for (i = 0; i <= rgn_n_insns; i++)
2703 free (choice_stack [i].state);
2704 free (choice_stack);
2707 /* Set_priorities: compute priority of each insn in the block. */
2710 set_priorities (rtx head, rtx tail)
2712 rtx insn;
2713 int n_insn;
2714 int sched_max_insns_priority =
2715 current_sched_info->sched_max_insns_priority;
2716 rtx prev_head;
2718 if (head == tail && (! INSN_P (head)))
2719 return 0;
2721 n_insn = 0;
2723 prev_head = PREV_INSN (head);
2724 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
2726 if (!INSN_P (insn))
2727 continue;
2729 n_insn++;
2730 (void) priority (insn);
2732 if (INSN_PRIORITY_KNOWN (insn))
2733 sched_max_insns_priority =
2734 MAX (sched_max_insns_priority, INSN_PRIORITY (insn));
2737 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
2739 return n_insn;
2742 /* Next LUID to assign to an instruction. */
2743 static int luid;
2745 /* Initialize some global state for the scheduler. */
2747 void
2748 sched_init (void)
2750 basic_block b;
2751 rtx insn;
2752 int i;
2754 /* Switch to working copy of sched_info. */
2755 memcpy (&current_sched_info_var, current_sched_info,
2756 sizeof (current_sched_info_var));
2757 current_sched_info = &current_sched_info_var;
2759 /* Disable speculative loads in their presence if cc0 defined. */
2760 #ifdef HAVE_cc0
2761 flag_schedule_speculative_load = 0;
2762 #endif
2764 /* Set dump and sched_verbose for the desired debugging output. If no
2765 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
2766 For -fsched-verbose=N, N>=10, print everything to stderr. */
2767 sched_verbose = sched_verbose_param;
2768 if (sched_verbose_param == 0 && dump_file)
2769 sched_verbose = 1;
2770 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
2771 ? stderr : dump_file);
2773 /* Initialize SPEC_INFO. */
2774 if (targetm.sched.set_sched_flags)
2776 spec_info = &spec_info_var;
2777 targetm.sched.set_sched_flags (spec_info);
2778 if (current_sched_info->flags & DO_SPECULATION)
2779 spec_info->weakness_cutoff =
2780 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
2781 else
2782 /* So we won't read anything accidentally. */
2783 spec_info = 0;
2784 #ifdef ENABLE_CHECKING
2785 check_sched_flags ();
2786 #endif
2788 else
2789 /* So we won't read anything accidentally. */
2790 spec_info = 0;
2792 /* Initialize issue_rate. */
2793 if (targetm.sched.issue_rate)
2794 issue_rate = targetm.sched.issue_rate ();
2795 else
2796 issue_rate = 1;
2798 if (cached_issue_rate != issue_rate)
2800 cached_issue_rate = issue_rate;
2801 /* To invalidate max_lookahead_tries: */
2802 cached_first_cycle_multipass_dfa_lookahead = 0;
2805 old_max_uid = 0;
2806 h_i_d = 0;
2807 extend_h_i_d ();
2809 for (i = 0; i < old_max_uid; i++)
2811 h_i_d[i].cost = -1;
2812 h_i_d[i].todo_spec = HARD_DEP;
2813 h_i_d[i].queue_index = QUEUE_NOWHERE;
2814 h_i_d[i].tick = INVALID_TICK;
2815 h_i_d[i].inter_tick = INVALID_TICK;
2818 if (targetm.sched.init_dfa_pre_cycle_insn)
2819 targetm.sched.init_dfa_pre_cycle_insn ();
2821 if (targetm.sched.init_dfa_post_cycle_insn)
2822 targetm.sched.init_dfa_post_cycle_insn ();
2824 dfa_start ();
2825 dfa_state_size = state_size ();
2826 curr_state = xmalloc (dfa_state_size);
2828 h_i_d[0].luid = 0;
2829 luid = 1;
2830 FOR_EACH_BB (b)
2831 for (insn = BB_HEAD (b); ; insn = NEXT_INSN (insn))
2833 INSN_LUID (insn) = luid;
2835 /* Increment the next luid, unless this is a note. We don't
2836 really need separate IDs for notes and we don't want to
2837 schedule differently depending on whether or not there are
2838 line-number notes, i.e., depending on whether or not we're
2839 generating debugging information. */
2840 if (!NOTE_P (insn))
2841 ++luid;
2843 if (insn == BB_END (b))
2844 break;
2847 init_dependency_caches (luid);
2849 init_alias_analysis ();
2851 line_note_head = 0;
2852 old_last_basic_block = 0;
2853 glat_start = 0;
2854 glat_end = 0;
2855 extend_bb (0);
2857 if (current_sched_info->flags & USE_GLAT)
2858 init_glat ();
2860 /* Compute INSN_REG_WEIGHT for all blocks. We must do this before
2861 removing death notes. */
2862 FOR_EACH_BB_REVERSE (b)
2863 find_insn_reg_weight (b);
2865 if (targetm.sched.md_init_global)
2866 targetm.sched.md_init_global (sched_dump, sched_verbose, old_max_uid);
2868 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
2869 before_recovery = 0;
2871 #ifdef ENABLE_CHECKING
2872 /* This is used preferably for finding bugs in check_cfg () itself. */
2873 check_cfg (0, 0);
2874 #endif
2877 /* Free global data used during insn scheduling. */
2879 void
2880 sched_finish (void)
2882 free (h_i_d);
2883 free (curr_state);
2884 dfa_finish ();
2885 free_dependency_caches ();
2886 end_alias_analysis ();
2887 free (line_note_head);
2888 free_glat ();
2890 if (targetm.sched.md_finish_global)
2891 targetm.sched.md_finish_global (sched_dump, sched_verbose);
2893 if (spec_info && spec_info->dump)
2895 char c = reload_completed ? 'a' : 'b';
2897 fprintf (spec_info->dump,
2898 ";; %s:\n", current_function_name ());
2900 fprintf (spec_info->dump,
2901 ";; Procedure %cr-begin-data-spec motions == %d\n",
2902 c, nr_begin_data);
2903 fprintf (spec_info->dump,
2904 ";; Procedure %cr-be-in-data-spec motions == %d\n",
2905 c, nr_be_in_data);
2906 fprintf (spec_info->dump,
2907 ";; Procedure %cr-begin-control-spec motions == %d\n",
2908 c, nr_begin_control);
2909 fprintf (spec_info->dump,
2910 ";; Procedure %cr-be-in-control-spec motions == %d\n",
2911 c, nr_be_in_control);
2914 #ifdef ENABLE_CHECKING
2915 /* After reload ia64 backend clobbers CFG, so can't check anything. */
2916 if (!reload_completed)
2917 check_cfg (0, 0);
2918 #endif
2920 current_sched_info = NULL;
2923 /* Fix INSN_TICKs of the instructions in the current block as well as
2924 INSN_TICKs of their dependents.
2925 HEAD and TAIL are the begin and the end of the current scheduled block. */
2926 static void
2927 fix_inter_tick (rtx head, rtx tail)
2929 /* Set of instructions with corrected INSN_TICK. */
2930 bitmap_head processed;
2931 int next_clock = clock_var + 1;
2933 bitmap_initialize (&processed, 0);
2935 /* Iterates over scheduled instructions and fix their INSN_TICKs and
2936 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
2937 across different blocks. */
2938 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
2940 if (INSN_P (head))
2942 int tick;
2943 rtx link;
2945 tick = INSN_TICK (head);
2946 gcc_assert (tick >= MIN_TICK);
2948 /* Fix INSN_TICK of instruction from just scheduled block. */
2949 if (!bitmap_bit_p (&processed, INSN_LUID (head)))
2951 bitmap_set_bit (&processed, INSN_LUID (head));
2952 tick -= next_clock;
2954 if (tick < MIN_TICK)
2955 tick = MIN_TICK;
2957 INSN_TICK (head) = tick;
2960 for (link = INSN_DEPEND (head); link; link = XEXP (link, 1))
2962 rtx next;
2964 next = XEXP (link, 0);
2965 tick = INSN_TICK (next);
2967 if (tick != INVALID_TICK
2968 /* If NEXT has its INSN_TICK calculated, fix it.
2969 If not - it will be properly calculated from
2970 scratch later in fix_tick_ready. */
2971 && !bitmap_bit_p (&processed, INSN_LUID (next)))
2973 bitmap_set_bit (&processed, INSN_LUID (next));
2974 tick -= next_clock;
2976 if (tick < MIN_TICK)
2977 tick = MIN_TICK;
2979 if (tick > INTER_TICK (next))
2980 INTER_TICK (next) = tick;
2981 else
2982 tick = INTER_TICK (next);
2984 INSN_TICK (next) = tick;
2989 bitmap_clear (&processed);
2992 /* Check if NEXT is ready to be added to the ready or queue list.
2993 If "yes", add it to the proper list.
2994 Returns:
2995 -1 - is not ready yet,
2996 0 - added to the ready list,
2997 0 < N - queued for N cycles. */
2999 try_ready (rtx next)
3001 ds_t old_ts, *ts;
3002 rtx link;
3004 ts = &TODO_SPEC (next);
3005 old_ts = *ts;
3007 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
3008 && ((old_ts & HARD_DEP)
3009 || (old_ts & SPECULATIVE)));
3011 if (!(current_sched_info->flags & DO_SPECULATION))
3013 if (!LOG_LINKS (next))
3014 *ts &= ~HARD_DEP;
3016 else
3018 *ts &= ~SPECULATIVE & ~HARD_DEP;
3020 link = LOG_LINKS (next);
3021 if (link)
3023 /* LOG_LINKS are maintained sorted.
3024 So if DEP_STATUS of the first dep is SPECULATIVE,
3025 than all other deps are speculative too. */
3026 if (DEP_STATUS (link) & SPECULATIVE)
3028 /* Now we've got NEXT with speculative deps only.
3029 1. Look at the deps to see what we have to do.
3030 2. Check if we can do 'todo'. */
3031 *ts = DEP_STATUS (link) & SPECULATIVE;
3032 while ((link = XEXP (link, 1)))
3033 *ts = ds_merge (*ts, DEP_STATUS (link) & SPECULATIVE);
3035 if (dep_weak (*ts) < spec_info->weakness_cutoff)
3036 /* Too few points. */
3037 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3039 else
3040 *ts |= HARD_DEP;
3044 if (*ts & HARD_DEP)
3045 gcc_assert (*ts == old_ts
3046 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
3047 else if (current_sched_info->new_ready)
3048 *ts = current_sched_info->new_ready (next, *ts);
3050 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
3051 have its original pattern or changed (speculative) one. This is due
3052 to changing ebb in region scheduling.
3053 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
3054 has speculative pattern.
3056 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
3057 control-speculative NEXT could have been discarded by sched-rgn.c
3058 (the same case as when discarded by can_schedule_ready_p ()). */
3060 if ((*ts & SPECULATIVE)
3061 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
3062 need to change anything. */
3063 && *ts != old_ts)
3065 int res;
3066 rtx new_pat;
3068 gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
3070 res = speculate_insn (next, *ts, &new_pat);
3072 switch (res)
3074 case -1:
3075 /* It would be nice to change DEP_STATUS of all dependences,
3076 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
3077 so we won't reanalyze anything. */
3078 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3079 break;
3081 case 0:
3082 /* We follow the rule, that every speculative insn
3083 has non-null ORIG_PAT. */
3084 if (!ORIG_PAT (next))
3085 ORIG_PAT (next) = PATTERN (next);
3086 break;
3088 case 1:
3089 if (!ORIG_PAT (next))
3090 /* If we gonna to overwrite the original pattern of insn,
3091 save it. */
3092 ORIG_PAT (next) = PATTERN (next);
3094 change_pattern (next, new_pat);
3095 break;
3097 default:
3098 gcc_unreachable ();
3102 /* We need to restore pattern only if (*ts == 0), because otherwise it is
3103 either correct (*ts & SPECULATIVE),
3104 or we simply don't care (*ts & HARD_DEP). */
3106 gcc_assert (!ORIG_PAT (next)
3107 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
3109 if (*ts & HARD_DEP)
3111 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
3112 control-speculative NEXT could have been discarded by sched-rgn.c
3113 (the same case as when discarded by can_schedule_ready_p ()). */
3114 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
3116 change_queue_index (next, QUEUE_NOWHERE);
3117 return -1;
3119 else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
3120 /* We should change pattern of every previously speculative
3121 instruction - and we determine if NEXT was speculative by using
3122 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
3123 pat too, so skip them. */
3125 change_pattern (next, ORIG_PAT (next));
3126 ORIG_PAT (next) = 0;
3129 if (sched_verbose >= 2)
3131 int s = TODO_SPEC (next);
3133 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
3134 (*current_sched_info->print_insn) (next, 0));
3136 if (spec_info && spec_info->dump)
3138 if (s & BEGIN_DATA)
3139 fprintf (spec_info->dump, "; data-spec;");
3140 if (s & BEGIN_CONTROL)
3141 fprintf (spec_info->dump, "; control-spec;");
3142 if (s & BE_IN_CONTROL)
3143 fprintf (spec_info->dump, "; in-control-spec;");
3146 fprintf (sched_dump, "\n");
3149 adjust_priority (next);
3151 return fix_tick_ready (next);
3154 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
3155 static int
3156 fix_tick_ready (rtx next)
3158 rtx link;
3159 int tick, delay;
3161 link = RESOLVED_DEPS (next);
3163 if (link)
3165 int full_p;
3167 tick = INSN_TICK (next);
3168 /* if tick is not equal to INVALID_TICK, then update
3169 INSN_TICK of NEXT with the most recent resolved dependence
3170 cost. Otherwise, recalculate from scratch. */
3171 full_p = tick == INVALID_TICK;
3174 rtx pro;
3175 int tick1;
3177 pro = XEXP (link, 0);
3178 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
3180 tick1 = INSN_TICK (pro) + insn_cost (pro, link, next);
3181 if (tick1 > tick)
3182 tick = tick1;
3184 while ((link = XEXP (link, 1)) && full_p);
3186 else
3187 tick = -1;
3189 INSN_TICK (next) = tick;
3191 delay = tick - clock_var;
3192 if (delay <= 0)
3193 delay = QUEUE_READY;
3195 change_queue_index (next, delay);
3197 return delay;
3200 /* Move NEXT to the proper queue list with (DELAY >= 1),
3201 or add it to the ready list (DELAY == QUEUE_READY),
3202 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
3203 static void
3204 change_queue_index (rtx next, int delay)
3206 int i = QUEUE_INDEX (next);
3208 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
3209 && delay != 0);
3210 gcc_assert (i != QUEUE_SCHEDULED);
3212 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
3213 || (delay < 0 && delay == i))
3214 /* We have nothing to do. */
3215 return;
3217 /* Remove NEXT from wherever it is now. */
3218 if (i == QUEUE_READY)
3219 ready_remove_insn (next);
3220 else if (i >= 0)
3221 queue_remove (next);
3223 /* Add it to the proper place. */
3224 if (delay == QUEUE_READY)
3225 ready_add (readyp, next, false);
3226 else if (delay >= 1)
3227 queue_insn (next, delay);
3229 if (sched_verbose >= 2)
3231 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
3232 (*current_sched_info->print_insn) (next, 0));
3234 if (delay == QUEUE_READY)
3235 fprintf (sched_dump, " into ready\n");
3236 else if (delay >= 1)
3237 fprintf (sched_dump, " into queue with cost=%d\n", delay);
3238 else
3239 fprintf (sched_dump, " removed from ready or queue lists\n");
3243 /* INSN is being scheduled. Resolve the dependence between INSN and NEXT. */
3244 static void
3245 resolve_dep (rtx next, rtx insn)
3247 rtx dep;
3249 INSN_DEP_COUNT (next)--;
3251 dep = remove_list_elem (insn, &LOG_LINKS (next));
3252 XEXP (dep, 1) = RESOLVED_DEPS (next);
3253 RESOLVED_DEPS (next) = dep;
3255 gcc_assert ((INSN_DEP_COUNT (next) != 0 || !LOG_LINKS (next))
3256 && (LOG_LINKS (next) || INSN_DEP_COUNT (next) == 0));
3259 /* Extend H_I_D data. */
3260 static void
3261 extend_h_i_d (void)
3263 /* We use LUID 0 for the fake insn (UID 0) which holds dependencies for
3264 pseudos which do not cross calls. */
3265 int new_max_uid = get_max_uid() + 1;
3267 h_i_d = xrecalloc (h_i_d, new_max_uid, old_max_uid, sizeof (*h_i_d));
3268 old_max_uid = new_max_uid;
3270 if (targetm.sched.h_i_d_extended)
3271 targetm.sched.h_i_d_extended ();
3274 /* Extend READY, READY_TRY and CHOICE_STACK arrays.
3275 N_NEW_INSNS is the number of additional elements to allocate. */
3276 static void
3277 extend_ready (int n_new_insns)
3279 int i;
3281 readyp->veclen = rgn_n_insns + n_new_insns + 1 + issue_rate;
3282 readyp->vec = XRESIZEVEC (rtx, readyp->vec, readyp->veclen);
3284 ready_try = xrecalloc (ready_try, rgn_n_insns + n_new_insns + 1,
3285 rgn_n_insns + 1, sizeof (char));
3287 rgn_n_insns += n_new_insns;
3289 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
3290 rgn_n_insns + 1);
3292 for (i = rgn_n_insns; n_new_insns--; i--)
3293 choice_stack[i].state = xmalloc (dfa_state_size);
3296 /* Extend global scheduler structures (those, that live across calls to
3297 schedule_block) to include information about just emitted INSN. */
3298 static void
3299 extend_global (rtx insn)
3301 gcc_assert (INSN_P (insn));
3302 /* These structures have scheduler scope. */
3303 extend_h_i_d ();
3304 init_h_i_d (insn);
3306 extend_dependency_caches (1, 0);
3309 /* Extends global and local scheduler structures to include information
3310 about just emitted INSN. */
3311 static void
3312 extend_all (rtx insn)
3314 extend_global (insn);
3316 /* These structures have block scope. */
3317 extend_ready (1);
3319 (*current_sched_info->add_remove_insn) (insn, 0);
3322 /* Initialize h_i_d entry of the new INSN with default values.
3323 Values, that are not explicitly initialized here, hold zero. */
3324 static void
3325 init_h_i_d (rtx insn)
3327 INSN_LUID (insn) = luid++;
3328 INSN_COST (insn) = -1;
3329 TODO_SPEC (insn) = HARD_DEP;
3330 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
3331 INSN_TICK (insn) = INVALID_TICK;
3332 INTER_TICK (insn) = INVALID_TICK;
3333 find_insn_reg_weight1 (insn);
3336 /* Generates recovery code for INSN. */
3337 static void
3338 generate_recovery_code (rtx insn)
3340 if (TODO_SPEC (insn) & BEGIN_SPEC)
3341 begin_speculative_block (insn);
3343 /* Here we have insn with no dependencies to
3344 instructions other then CHECK_SPEC ones. */
3346 if (TODO_SPEC (insn) & BE_IN_SPEC)
3347 add_to_speculative_block (insn);
3350 /* Helper function.
3351 Tries to add speculative dependencies of type FS between instructions
3352 in LINK list and TWIN. */
3353 static void
3354 process_insn_depend_be_in_spec (rtx link, rtx twin, ds_t fs)
3356 for (; link; link = XEXP (link, 1))
3358 ds_t ds;
3359 rtx consumer;
3361 consumer = XEXP (link, 0);
3363 ds = DEP_STATUS (link);
3365 if (/* If we want to create speculative dep. */
3367 /* And we can do that because this is a true dep. */
3368 && (ds & DEP_TYPES) == DEP_TRUE)
3370 gcc_assert (!(ds & BE_IN_SPEC));
3372 if (/* If this dep can be overcome with 'begin speculation'. */
3373 ds & BEGIN_SPEC)
3374 /* Then we have a choice: keep the dep 'begin speculative'
3375 or transform it into 'be in speculative'. */
3377 if (/* In try_ready we assert that if insn once became ready
3378 it can be removed from the ready (or queue) list only
3379 due to backend decision. Hence we can't let the
3380 probability of the speculative dep to decrease. */
3381 dep_weak (ds) <= dep_weak (fs))
3382 /* Transform it to be in speculative. */
3383 ds = (ds & ~BEGIN_SPEC) | fs;
3385 else
3386 /* Mark the dep as 'be in speculative'. */
3387 ds |= fs;
3390 add_back_forw_dep (consumer, twin, REG_NOTE_KIND (link), ds);
3394 /* Generates recovery code for BEGIN speculative INSN. */
3395 static void
3396 begin_speculative_block (rtx insn)
3398 if (TODO_SPEC (insn) & BEGIN_DATA)
3399 nr_begin_data++;
3400 if (TODO_SPEC (insn) & BEGIN_CONTROL)
3401 nr_begin_control++;
3403 create_check_block_twin (insn, false);
3405 TODO_SPEC (insn) &= ~BEGIN_SPEC;
3408 /* Generates recovery code for BE_IN speculative INSN. */
3409 static void
3410 add_to_speculative_block (rtx insn)
3412 ds_t ts;
3413 rtx link, twins = NULL;
3415 ts = TODO_SPEC (insn);
3416 gcc_assert (!(ts & ~BE_IN_SPEC));
3418 if (ts & BE_IN_DATA)
3419 nr_be_in_data++;
3420 if (ts & BE_IN_CONTROL)
3421 nr_be_in_control++;
3423 TODO_SPEC (insn) &= ~BE_IN_SPEC;
3424 gcc_assert (!TODO_SPEC (insn));
3426 DONE_SPEC (insn) |= ts;
3428 /* First we convert all simple checks to branchy. */
3429 for (link = LOG_LINKS (insn); link;)
3431 rtx check;
3433 check = XEXP (link, 0);
3435 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
3437 create_check_block_twin (check, true);
3438 link = LOG_LINKS (insn);
3440 else
3441 link = XEXP (link, 1);
3444 clear_priorities (insn);
3448 rtx link, check, twin;
3449 basic_block rec;
3451 link = LOG_LINKS (insn);
3452 gcc_assert (!(DEP_STATUS (link) & BEGIN_SPEC)
3453 && (DEP_STATUS (link) & BE_IN_SPEC)
3454 && (DEP_STATUS (link) & DEP_TYPES) == DEP_TRUE);
3456 check = XEXP (link, 0);
3458 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
3459 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
3461 rec = BLOCK_FOR_INSN (check);
3463 twin = emit_insn_before (copy_rtx (PATTERN (insn)), BB_END (rec));
3464 extend_global (twin);
3466 RESOLVED_DEPS (twin) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn));
3468 if (sched_verbose && spec_info->dump)
3469 /* INSN_BB (insn) isn't determined for twin insns yet.
3470 So we can't use current_sched_info->print_insn. */
3471 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3472 INSN_UID (twin), rec->index);
3474 twins = alloc_INSN_LIST (twin, twins);
3476 /* Add dependences between TWIN and all appropriate
3477 instructions from REC. */
3480 add_back_forw_dep (twin, check, REG_DEP_TRUE, DEP_TRUE);
3484 link = XEXP (link, 1);
3485 if (link)
3487 check = XEXP (link, 0);
3488 if (BLOCK_FOR_INSN (check) == rec)
3489 break;
3491 else
3492 break;
3494 while (1);
3496 while (link);
3498 process_insn_depend_be_in_spec (INSN_DEPEND (insn), twin, ts);
3500 for (link = LOG_LINKS (insn); link;)
3502 check = XEXP (link, 0);
3504 if (BLOCK_FOR_INSN (check) == rec)
3506 delete_back_forw_dep (insn, check);
3507 link = LOG_LINKS (insn);
3509 else
3510 link = XEXP (link, 1);
3513 while (LOG_LINKS (insn));
3515 /* We can't add the dependence between insn and twin earlier because
3516 that would make twin appear in the INSN_DEPEND (insn). */
3517 while (twins)
3519 rtx twin;
3521 twin = XEXP (twins, 0);
3522 calc_priorities (twin);
3523 add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
3525 twin = XEXP (twins, 1);
3526 free_INSN_LIST_node (twins);
3527 twins = twin;
3531 /* Extends and fills with zeros (only the new part) array pointed to by P. */
3532 void *
3533 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
3535 gcc_assert (new_nmemb >= old_nmemb);
3536 p = XRESIZEVAR (void, p, new_nmemb * size);
3537 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
3538 return p;
3541 /* Return the probability of speculation success for the speculation
3542 status DS. */
3543 static dw_t
3544 dep_weak (ds_t ds)
3546 ds_t res = 1, dt;
3547 int n = 0;
3549 dt = FIRST_SPEC_TYPE;
3552 if (ds & dt)
3554 res *= (ds_t) get_dep_weak (ds, dt);
3555 n++;
3558 if (dt == LAST_SPEC_TYPE)
3559 break;
3560 dt <<= SPEC_TYPE_SHIFT;
3562 while (1);
3564 gcc_assert (n);
3565 while (--n)
3566 res /= MAX_DEP_WEAK;
3568 if (res < MIN_DEP_WEAK)
3569 res = MIN_DEP_WEAK;
3571 gcc_assert (res <= MAX_DEP_WEAK);
3573 return (dw_t) res;
3576 /* Helper function.
3577 Find fallthru edge from PRED. */
3578 static edge
3579 find_fallthru_edge (basic_block pred)
3581 edge e;
3582 edge_iterator ei;
3583 basic_block succ;
3585 succ = pred->next_bb;
3586 gcc_assert (succ->prev_bb == pred);
3588 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
3590 FOR_EACH_EDGE (e, ei, pred->succs)
3591 if (e->flags & EDGE_FALLTHRU)
3593 gcc_assert (e->dest == succ);
3594 return e;
3597 else
3599 FOR_EACH_EDGE (e, ei, succ->preds)
3600 if (e->flags & EDGE_FALLTHRU)
3602 gcc_assert (e->src == pred);
3603 return e;
3607 return NULL;
3610 /* Initialize BEFORE_RECOVERY variable. */
3611 static void
3612 init_before_recovery (void)
3614 basic_block last;
3615 edge e;
3617 last = EXIT_BLOCK_PTR->prev_bb;
3618 e = find_fallthru_edge (last);
3620 if (e)
3622 /* We create two basic blocks:
3623 1. Single instruction block is inserted right after E->SRC
3624 and has jump to
3625 2. Empty block right before EXIT_BLOCK.
3626 Between these two blocks recovery blocks will be emitted. */
3628 basic_block single, empty;
3629 rtx x, label;
3631 single = create_empty_bb (last);
3632 empty = create_empty_bb (single);
3634 single->count = last->count;
3635 empty->count = last->count;
3636 single->frequency = last->frequency;
3637 empty->frequency = last->frequency;
3638 BB_COPY_PARTITION (single, last);
3639 BB_COPY_PARTITION (empty, last);
3641 redirect_edge_succ (e, single);
3642 make_single_succ_edge (single, empty, 0);
3643 make_single_succ_edge (empty, EXIT_BLOCK_PTR,
3644 EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
3646 label = block_label (empty);
3647 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
3648 JUMP_LABEL (x) = label;
3649 LABEL_NUSES (label)++;
3650 extend_global (x);
3652 emit_barrier_after (x);
3654 add_block (empty, 0);
3655 add_block (single, 0);
3657 before_recovery = single;
3659 if (sched_verbose >= 2 && spec_info->dump)
3660 fprintf (spec_info->dump,
3661 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
3662 last->index, single->index, empty->index);
3664 else
3665 before_recovery = last;
3668 /* Returns new recovery block. */
3669 static basic_block
3670 create_recovery_block (void)
3672 rtx label;
3673 rtx barrier;
3674 basic_block rec;
3676 added_recovery_block_p = true;
3678 if (!before_recovery)
3679 init_before_recovery ();
3681 barrier = get_last_bb_insn (before_recovery);
3682 gcc_assert (BARRIER_P (barrier));
3684 label = emit_label_after (gen_label_rtx (), barrier);
3686 rec = create_basic_block (label, label, before_recovery);
3688 /* Recovery block always end with an unconditional jump. */
3689 emit_barrier_after (BB_END (rec));
3691 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
3692 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
3694 if (sched_verbose && spec_info->dump)
3695 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
3696 rec->index);
3698 before_recovery = rec;
3700 return rec;
3703 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
3704 INSN is a simple check, that should be converted to branchy one. */
3705 static void
3706 create_check_block_twin (rtx insn, bool mutate_p)
3708 basic_block rec;
3709 rtx label, check, twin, link;
3710 ds_t fs;
3712 gcc_assert (ORIG_PAT (insn)
3713 && (!mutate_p
3714 || (IS_SPECULATION_SIMPLE_CHECK_P (insn)
3715 && !(TODO_SPEC (insn) & SPECULATIVE))));
3717 /* Create recovery block. */
3718 if (mutate_p || targetm.sched.needs_block_p (insn))
3720 rec = create_recovery_block ();
3721 label = BB_HEAD (rec);
3723 else
3725 rec = EXIT_BLOCK_PTR;
3726 label = 0;
3729 /* Emit CHECK. */
3730 check = targetm.sched.gen_check (insn, label, mutate_p);
3732 if (rec != EXIT_BLOCK_PTR)
3734 /* To have mem_reg alive at the beginning of second_bb,
3735 we emit check BEFORE insn, so insn after splitting
3736 insn will be at the beginning of second_bb, which will
3737 provide us with the correct life information. */
3738 check = emit_jump_insn_before (check, insn);
3739 JUMP_LABEL (check) = label;
3740 LABEL_NUSES (label)++;
3742 else
3743 check = emit_insn_before (check, insn);
3745 /* Extend data structures. */
3746 extend_all (check);
3747 RECOVERY_BLOCK (check) = rec;
3749 if (sched_verbose && spec_info->dump)
3750 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
3751 (*current_sched_info->print_insn) (check, 0));
3753 gcc_assert (ORIG_PAT (insn));
3755 /* Initialize TWIN (twin is a duplicate of original instruction
3756 in the recovery block). */
3757 if (rec != EXIT_BLOCK_PTR)
3759 rtx link;
3761 for (link = RESOLVED_DEPS (insn); link; link = XEXP (link, 1))
3762 if (DEP_STATUS (link) & DEP_OUTPUT)
3764 RESOLVED_DEPS (check) =
3765 alloc_DEPS_LIST (XEXP (link, 0), RESOLVED_DEPS (check), DEP_TRUE);
3766 PUT_REG_NOTE_KIND (RESOLVED_DEPS (check), REG_DEP_TRUE);
3769 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
3770 extend_global (twin);
3772 if (sched_verbose && spec_info->dump)
3773 /* INSN_BB (insn) isn't determined for twin insns yet.
3774 So we can't use current_sched_info->print_insn. */
3775 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3776 INSN_UID (twin), rec->index);
3778 else
3780 ORIG_PAT (check) = ORIG_PAT (insn);
3781 HAS_INTERNAL_DEP (check) = 1;
3782 twin = check;
3783 /* ??? We probably should change all OUTPUT dependencies to
3784 (TRUE | OUTPUT). */
3787 RESOLVED_DEPS (twin) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn));
3789 if (rec != EXIT_BLOCK_PTR)
3790 /* In case of branchy check, fix CFG. */
3792 basic_block first_bb, second_bb;
3793 rtx jump;
3794 edge e;
3795 int edge_flags;
3797 first_bb = BLOCK_FOR_INSN (check);
3798 e = split_block (first_bb, check);
3799 /* split_block emits note if *check == BB_END. Probably it
3800 is better to rip that note off. */
3801 gcc_assert (e->src == first_bb);
3802 second_bb = e->dest;
3804 /* This is fixing of incoming edge. */
3805 /* ??? Which other flags should be specified? */
3806 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
3807 /* Partition type is the same, if it is "unpartitioned". */
3808 edge_flags = EDGE_CROSSING;
3809 else
3810 edge_flags = 0;
3812 e = make_edge (first_bb, rec, edge_flags);
3814 add_block (second_bb, first_bb);
3816 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (second_bb)));
3817 label = block_label (second_bb);
3818 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
3819 JUMP_LABEL (jump) = label;
3820 LABEL_NUSES (label)++;
3821 extend_global (jump);
3823 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
3824 /* Partition type is the same, if it is "unpartitioned". */
3826 /* Rewritten from cfgrtl.c. */
3827 if (flag_reorder_blocks_and_partition
3828 && targetm.have_named_sections
3829 /*&& !any_condjump_p (jump)*/)
3830 /* any_condjump_p (jump) == false.
3831 We don't need the same note for the check because
3832 any_condjump_p (check) == true. */
3834 REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP,
3835 NULL_RTX,
3836 REG_NOTES (jump));
3838 edge_flags = EDGE_CROSSING;
3840 else
3841 edge_flags = 0;
3843 make_single_succ_edge (rec, second_bb, edge_flags);
3845 add_block (rec, EXIT_BLOCK_PTR);
3848 /* Move backward dependences from INSN to CHECK and
3849 move forward dependences from INSN to TWIN. */
3850 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
3852 ds_t ds;
3854 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
3855 check --TRUE--> producer ??? or ANTI ???
3856 twin --TRUE--> producer
3857 twin --ANTI--> check
3859 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
3860 check --ANTI--> producer
3861 twin --ANTI--> producer
3862 twin --ANTI--> check
3864 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
3865 check ~~TRUE~~> producer
3866 twin ~~TRUE~~> producer
3867 twin --ANTI--> check */
3869 ds = DEP_STATUS (link);
3871 if (ds & BEGIN_SPEC)
3873 gcc_assert (!mutate_p);
3874 ds &= ~BEGIN_SPEC;
3877 if (rec != EXIT_BLOCK_PTR)
3879 add_back_forw_dep (check, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3880 add_back_forw_dep (twin, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3882 else
3883 add_back_forw_dep (check, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3886 for (link = LOG_LINKS (insn); link;)
3887 if ((DEP_STATUS (link) & BEGIN_SPEC)
3888 || mutate_p)
3889 /* We can delete this dep only if we totally overcome it with
3890 BEGIN_SPECULATION. */
3892 delete_back_forw_dep (insn, XEXP (link, 0));
3893 link = LOG_LINKS (insn);
3895 else
3896 link = XEXP (link, 1);
3898 fs = 0;
3900 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
3901 here. */
3903 gcc_assert (!DONE_SPEC (insn));
3905 if (!mutate_p)
3907 ds_t ts = TODO_SPEC (insn);
3909 DONE_SPEC (insn) = ts & BEGIN_SPEC;
3910 CHECK_SPEC (check) = ts & BEGIN_SPEC;
3912 if (ts & BEGIN_DATA)
3913 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
3914 if (ts & BEGIN_CONTROL)
3915 fs = set_dep_weak (fs, BE_IN_CONTROL, get_dep_weak (ts, BEGIN_CONTROL));
3917 else
3918 CHECK_SPEC (check) = CHECK_SPEC (insn);
3920 /* Future speculations: call the helper. */
3921 process_insn_depend_be_in_spec (INSN_DEPEND (insn), twin, fs);
3923 if (rec != EXIT_BLOCK_PTR)
3925 /* Which types of dependencies should we use here is,
3926 generally, machine-dependent question... But, for now,
3927 it is not. */
3929 if (!mutate_p)
3931 add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE);
3932 add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
3934 else
3936 if (spec_info->dump)
3937 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
3938 (*current_sched_info->print_insn) (insn, 0));
3940 for (link = INSN_DEPEND (insn); link; link = INSN_DEPEND (insn))
3941 delete_back_forw_dep (XEXP (link, 0), insn);
3943 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
3944 try_ready (check);
3946 sched_remove_insn (insn);
3949 add_back_forw_dep (twin, check, REG_DEP_ANTI, DEP_ANTI);
3951 else
3952 add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
3954 if (!mutate_p)
3955 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
3956 because it'll be done later in add_to_speculative_block. */
3958 clear_priorities (twin);
3959 calc_priorities (twin);
3963 /* Removes dependency between instructions in the recovery block REC
3964 and usual region instructions. It keeps inner dependences so it
3965 won't be necessary to recompute them. */
3966 static void
3967 fix_recovery_deps (basic_block rec)
3969 rtx note, insn, link, jump, ready_list = 0;
3970 bitmap_head in_ready;
3972 bitmap_initialize (&in_ready, 0);
3974 /* NOTE - a basic block note. */
3975 note = NEXT_INSN (BB_HEAD (rec));
3976 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
3977 insn = BB_END (rec);
3978 gcc_assert (JUMP_P (insn));
3979 insn = PREV_INSN (insn);
3983 for (link = INSN_DEPEND (insn); link;)
3985 rtx consumer;
3987 consumer = XEXP (link, 0);
3989 if (BLOCK_FOR_INSN (consumer) != rec)
3991 delete_back_forw_dep (consumer, insn);
3993 if (!bitmap_bit_p (&in_ready, INSN_LUID (consumer)))
3995 ready_list = alloc_INSN_LIST (consumer, ready_list);
3996 bitmap_set_bit (&in_ready, INSN_LUID (consumer));
3999 link = INSN_DEPEND (insn);
4001 else
4003 gcc_assert ((DEP_STATUS (link) & DEP_TYPES) == DEP_TRUE);
4005 link = XEXP (link, 1);
4009 insn = PREV_INSN (insn);
4011 while (insn != note);
4013 bitmap_clear (&in_ready);
4015 /* Try to add instructions to the ready or queue list. */
4016 for (link = ready_list; link; link = XEXP (link, 1))
4017 try_ready (XEXP (link, 0));
4018 free_INSN_LIST_list (&ready_list);
4020 /* Fixing jump's dependences. */
4021 insn = BB_HEAD (rec);
4022 jump = BB_END (rec);
4024 gcc_assert (LABEL_P (insn));
4025 insn = NEXT_INSN (insn);
4027 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4028 add_jump_dependencies (insn, jump);
4031 /* The function saves line notes at the beginning of block B. */
4032 static void
4033 associate_line_notes_with_blocks (basic_block b)
4035 rtx line;
4037 for (line = BB_HEAD (b); line; line = PREV_INSN (line))
4038 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
4040 line_note_head[b->index] = line;
4041 break;
4043 /* Do a forward search as well, since we won't get to see the first
4044 notes in a basic block. */
4045 for (line = BB_HEAD (b); line; line = NEXT_INSN (line))
4047 if (INSN_P (line))
4048 break;
4049 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
4050 line_note_head[b->index] = line;
4054 /* Changes pattern of the INSN to NEW_PAT. */
4055 static void
4056 change_pattern (rtx insn, rtx new_pat)
4058 int t;
4060 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
4061 gcc_assert (t);
4062 /* Invalidate INSN_COST, so it'll be recalculated. */
4063 INSN_COST (insn) = -1;
4064 /* Invalidate INSN_TICK, so it'll be recalculated. */
4065 INSN_TICK (insn) = INVALID_TICK;
4066 dfa_clear_single_insn_cache (insn);
4070 /* -1 - can't speculate,
4071 0 - for speculation with REQUEST mode it is OK to use
4072 current instruction pattern,
4073 1 - need to change pattern for *NEW_PAT to be speculative. */
4074 static int
4075 speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4077 gcc_assert (current_sched_info->flags & DO_SPECULATION
4078 && (request & SPECULATIVE));
4080 if (!NONJUMP_INSN_P (insn)
4081 || HAS_INTERNAL_DEP (insn)
4082 || SCHED_GROUP_P (insn)
4083 || side_effects_p (PATTERN (insn))
4084 || (request & spec_info->mask) != request)
4085 return -1;
4087 gcc_assert (!IS_SPECULATION_CHECK_P (insn));
4089 if (request & BE_IN_SPEC)
4091 if (may_trap_p (PATTERN (insn)))
4092 return -1;
4094 if (!(request & BEGIN_SPEC))
4095 return 0;
4098 return targetm.sched.speculate_insn (insn, request & BEGIN_SPEC, new_pat);
4101 /* Print some information about block BB, which starts with HEAD and
4102 ends with TAIL, before scheduling it.
4103 I is zero, if scheduler is about to start with the fresh ebb. */
4104 static void
4105 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
4107 if (!i)
4108 fprintf (sched_dump,
4109 ";; ======================================================\n");
4110 else
4111 fprintf (sched_dump,
4112 ";; =====================ADVANCING TO=====================\n");
4113 fprintf (sched_dump,
4114 ";; -- basic block %d from %d to %d -- %s reload\n",
4115 bb->index, INSN_UID (head), INSN_UID (tail),
4116 (reload_completed ? "after" : "before"));
4117 fprintf (sched_dump,
4118 ";; ======================================================\n");
4119 fprintf (sched_dump, "\n");
4122 /* Unlink basic block notes and labels and saves them, so they
4123 can be easily restored. We unlink basic block notes in EBB to
4124 provide back-compatibility with the previous code, as target backends
4125 assume, that there'll be only instructions between
4126 current_sched_info->{head and tail}. We restore these notes as soon
4127 as we can.
4128 FIRST (LAST) is the first (last) basic block in the ebb.
4129 NB: In usual case (FIRST == LAST) nothing is really done. */
4130 void
4131 unlink_bb_notes (basic_block first, basic_block last)
4133 /* We DON'T unlink basic block notes of the first block in the ebb. */
4134 if (first == last)
4135 return;
4137 bb_header = xmalloc (last_basic_block * sizeof (*bb_header));
4139 /* Make a sentinel. */
4140 if (last->next_bb != EXIT_BLOCK_PTR)
4141 bb_header[last->next_bb->index] = 0;
4143 first = first->next_bb;
4146 rtx prev, label, note, next;
4148 label = BB_HEAD (last);
4149 if (LABEL_P (label))
4150 note = NEXT_INSN (label);
4151 else
4152 note = label;
4153 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4155 prev = PREV_INSN (label);
4156 next = NEXT_INSN (note);
4157 gcc_assert (prev && next);
4159 NEXT_INSN (prev) = next;
4160 PREV_INSN (next) = prev;
4162 bb_header[last->index] = label;
4164 if (last == first)
4165 break;
4167 last = last->prev_bb;
4169 while (1);
4172 /* Restore basic block notes.
4173 FIRST is the first basic block in the ebb. */
4174 static void
4175 restore_bb_notes (basic_block first)
4177 if (!bb_header)
4178 return;
4180 /* We DON'T unlink basic block notes of the first block in the ebb. */
4181 first = first->next_bb;
4182 /* Remember: FIRST is actually a second basic block in the ebb. */
4184 while (first != EXIT_BLOCK_PTR
4185 && bb_header[first->index])
4187 rtx prev, label, note, next;
4189 label = bb_header[first->index];
4190 prev = PREV_INSN (label);
4191 next = NEXT_INSN (prev);
4193 if (LABEL_P (label))
4194 note = NEXT_INSN (label);
4195 else
4196 note = label;
4197 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4199 bb_header[first->index] = 0;
4201 NEXT_INSN (prev) = label;
4202 NEXT_INSN (note) = next;
4203 PREV_INSN (next) = note;
4205 first = first->next_bb;
4208 free (bb_header);
4209 bb_header = 0;
4212 /* Extend per basic block data structures of the scheduler.
4213 If BB is NULL, initialize structures for the whole CFG.
4214 Otherwise, initialize them for the just created BB. */
4215 static void
4216 extend_bb (basic_block bb)
4218 rtx insn;
4220 if (write_symbols != NO_DEBUG)
4222 /* Save-line-note-head:
4223 Determine the line-number at the start of each basic block.
4224 This must be computed and saved now, because after a basic block's
4225 predecessor has been scheduled, it is impossible to accurately
4226 determine the correct line number for the first insn of the block. */
4227 line_note_head = xrecalloc (line_note_head, last_basic_block,
4228 old_last_basic_block,
4229 sizeof (*line_note_head));
4231 if (bb)
4232 associate_line_notes_with_blocks (bb);
4233 else
4234 FOR_EACH_BB (bb)
4235 associate_line_notes_with_blocks (bb);
4238 old_last_basic_block = last_basic_block;
4240 if (current_sched_info->flags & USE_GLAT)
4242 glat_start = xrealloc (glat_start,
4243 last_basic_block * sizeof (*glat_start));
4244 glat_end = xrealloc (glat_end, last_basic_block * sizeof (*glat_end));
4247 /* The following is done to keep current_sched_info->next_tail non null. */
4249 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
4250 if (NEXT_INSN (insn) == 0
4251 || (!NOTE_P (insn)
4252 && !LABEL_P (insn)
4253 /* Don't emit a NOTE if it would end up before a BARRIER. */
4254 && !BARRIER_P (NEXT_INSN (insn))))
4256 emit_note_after (NOTE_INSN_DELETED, insn);
4257 /* Make insn to appear outside BB. */
4258 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
4262 /* Add a basic block BB to extended basic block EBB.
4263 If EBB is EXIT_BLOCK_PTR, then BB is recovery block.
4264 If EBB is NULL, then BB should be a new region. */
4265 void
4266 add_block (basic_block bb, basic_block ebb)
4268 gcc_assert (current_sched_info->flags & DETACH_LIFE_INFO
4269 && bb->il.rtl->global_live_at_start == 0
4270 && bb->il.rtl->global_live_at_end == 0);
4272 extend_bb (bb);
4274 glat_start[bb->index] = 0;
4275 glat_end[bb->index] = 0;
4277 if (current_sched_info->add_block)
4278 /* This changes only data structures of the front-end. */
4279 current_sched_info->add_block (bb, ebb);
4282 /* Helper function.
4283 Fix CFG after both in- and inter-block movement of
4284 control_flow_insn_p JUMP. */
4285 static void
4286 fix_jump_move (rtx jump)
4288 basic_block bb, jump_bb, jump_bb_next;
4290 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4291 jump_bb = BLOCK_FOR_INSN (jump);
4292 jump_bb_next = jump_bb->next_bb;
4294 gcc_assert (current_sched_info->flags & SCHED_EBB
4295 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
4297 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
4298 /* if jump_bb_next is not empty. */
4299 BB_END (jump_bb) = BB_END (jump_bb_next);
4301 if (BB_END (bb) != PREV_INSN (jump))
4302 /* Then there are instruction after jump that should be placed
4303 to jump_bb_next. */
4304 BB_END (jump_bb_next) = BB_END (bb);
4305 else
4306 /* Otherwise jump_bb_next is empty. */
4307 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
4309 /* To make assertion in move_insn happy. */
4310 BB_END (bb) = PREV_INSN (jump);
4312 update_bb_for_insn (jump_bb_next);
4315 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
4316 static void
4317 move_block_after_check (rtx jump)
4319 basic_block bb, jump_bb, jump_bb_next;
4320 VEC(edge,gc) *t;
4322 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4323 jump_bb = BLOCK_FOR_INSN (jump);
4324 jump_bb_next = jump_bb->next_bb;
4326 update_bb_for_insn (jump_bb);
4328 gcc_assert (IS_SPECULATION_CHECK_P (jump)
4329 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
4331 unlink_block (jump_bb_next);
4332 link_block (jump_bb_next, bb);
4334 t = bb->succs;
4335 bb->succs = 0;
4336 move_succs (&(jump_bb->succs), bb);
4337 move_succs (&(jump_bb_next->succs), jump_bb);
4338 move_succs (&t, jump_bb_next);
4340 if (current_sched_info->fix_recovery_cfg)
4341 current_sched_info->fix_recovery_cfg
4342 (bb->index, jump_bb->index, jump_bb_next->index);
4345 /* Helper function for move_block_after_check.
4346 This functions attaches edge vector pointed to by SUCCSP to
4347 block TO. */
4348 static void
4349 move_succs (VEC(edge,gc) **succsp, basic_block to)
4351 edge e;
4352 edge_iterator ei;
4354 gcc_assert (to->succs == 0);
4356 to->succs = *succsp;
4358 FOR_EACH_EDGE (e, ei, to->succs)
4359 e->src = to;
4361 *succsp = 0;
4364 /* Initialize GLAT (global_live_at_{start, end}) structures.
4365 GLAT structures are used to substitute global_live_{start, end}
4366 regsets during scheduling. This is necessary to use such functions as
4367 split_block (), as they assume consistency of register live information. */
4368 static void
4369 init_glat (void)
4371 basic_block bb;
4373 FOR_ALL_BB (bb)
4374 init_glat1 (bb);
4377 /* Helper function for init_glat. */
4378 static void
4379 init_glat1 (basic_block bb)
4381 gcc_assert (bb->il.rtl->global_live_at_start != 0
4382 && bb->il.rtl->global_live_at_end != 0);
4384 glat_start[bb->index] = bb->il.rtl->global_live_at_start;
4385 glat_end[bb->index] = bb->il.rtl->global_live_at_end;
4387 if (current_sched_info->flags & DETACH_LIFE_INFO)
4389 bb->il.rtl->global_live_at_start = 0;
4390 bb->il.rtl->global_live_at_end = 0;
4394 /* Attach reg_live_info back to basic blocks.
4395 Also save regsets, that should not have been changed during scheduling,
4396 for checking purposes (see check_reg_live). */
4397 void
4398 attach_life_info (void)
4400 basic_block bb;
4402 FOR_ALL_BB (bb)
4403 attach_life_info1 (bb);
4406 /* Helper function for attach_life_info. */
4407 static void
4408 attach_life_info1 (basic_block bb)
4410 gcc_assert (bb->il.rtl->global_live_at_start == 0
4411 && bb->il.rtl->global_live_at_end == 0);
4413 if (glat_start[bb->index])
4415 gcc_assert (glat_end[bb->index]);
4417 bb->il.rtl->global_live_at_start = glat_start[bb->index];
4418 bb->il.rtl->global_live_at_end = glat_end[bb->index];
4420 /* Make them NULL, so they won't be freed in free_glat. */
4421 glat_start[bb->index] = 0;
4422 glat_end[bb->index] = 0;
4424 #ifdef ENABLE_CHECKING
4425 if (bb->index < NUM_FIXED_BLOCKS
4426 || current_sched_info->region_head_or_leaf_p (bb, 0))
4428 glat_start[bb->index] = ALLOC_REG_SET (&reg_obstack);
4429 COPY_REG_SET (glat_start[bb->index],
4430 bb->il.rtl->global_live_at_start);
4433 if (bb->index < NUM_FIXED_BLOCKS
4434 || current_sched_info->region_head_or_leaf_p (bb, 1))
4436 glat_end[bb->index] = ALLOC_REG_SET (&reg_obstack);
4437 COPY_REG_SET (glat_end[bb->index], bb->il.rtl->global_live_at_end);
4439 #endif
4441 else
4443 gcc_assert (!glat_end[bb->index]);
4445 bb->il.rtl->global_live_at_start = ALLOC_REG_SET (&reg_obstack);
4446 bb->il.rtl->global_live_at_end = ALLOC_REG_SET (&reg_obstack);
4450 /* Free GLAT information. */
4451 static void
4452 free_glat (void)
4454 #ifdef ENABLE_CHECKING
4455 if (current_sched_info->flags & DETACH_LIFE_INFO)
4457 basic_block bb;
4459 FOR_ALL_BB (bb)
4461 if (glat_start[bb->index])
4462 FREE_REG_SET (glat_start[bb->index]);
4463 if (glat_end[bb->index])
4464 FREE_REG_SET (glat_end[bb->index]);
4467 #endif
4469 free (glat_start);
4470 free (glat_end);
4473 /* Remove INSN from the instruction stream.
4474 INSN should have any dependencies. */
4475 static void
4476 sched_remove_insn (rtx insn)
4478 change_queue_index (insn, QUEUE_NOWHERE);
4479 current_sched_info->add_remove_insn (insn, 1);
4480 remove_insn (insn);
4483 /* Clear priorities of all instructions, that are
4484 forward dependent on INSN. */
4485 static void
4486 clear_priorities (rtx insn)
4488 rtx link;
4490 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
4492 rtx pro;
4494 pro = XEXP (link, 0);
4495 if (INSN_PRIORITY_KNOWN (pro))
4497 INSN_PRIORITY_KNOWN (pro) = 0;
4498 clear_priorities (pro);
4503 /* Recompute priorities of instructions, whose priorities might have been
4504 changed due to changes in INSN. */
4505 static void
4506 calc_priorities (rtx insn)
4508 rtx link;
4510 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
4512 rtx pro;
4514 pro = XEXP (link, 0);
4515 if (!INSN_PRIORITY_KNOWN (pro))
4517 priority (pro);
4518 calc_priorities (pro);
4524 /* Add dependences between JUMP and other instructions in the recovery
4525 block. INSN is the first insn the recovery block. */
4526 static void
4527 add_jump_dependencies (rtx insn, rtx jump)
4531 insn = NEXT_INSN (insn);
4532 if (insn == jump)
4533 break;
4535 if (!INSN_DEPEND (insn))
4536 add_back_forw_dep (jump, insn, REG_DEP_ANTI, DEP_ANTI);
4538 while (1);
4539 gcc_assert (LOG_LINKS (jump));
4542 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
4544 bb_note (basic_block bb)
4546 rtx note;
4548 note = BB_HEAD (bb);
4549 if (LABEL_P (note))
4550 note = NEXT_INSN (note);
4552 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4553 return note;
4556 #ifdef ENABLE_CHECKING
4557 extern void debug_spec_status (ds_t);
4559 /* Dump information about the dependence status S. */
4560 void
4561 debug_spec_status (ds_t s)
4563 FILE *f = stderr;
4565 if (s & BEGIN_DATA)
4566 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak (s, BEGIN_DATA));
4567 if (s & BE_IN_DATA)
4568 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak (s, BE_IN_DATA));
4569 if (s & BEGIN_CONTROL)
4570 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak (s, BEGIN_CONTROL));
4571 if (s & BE_IN_CONTROL)
4572 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak (s, BE_IN_CONTROL));
4574 if (s & HARD_DEP)
4575 fprintf (f, "HARD_DEP; ");
4577 if (s & DEP_TRUE)
4578 fprintf (f, "DEP_TRUE; ");
4579 if (s & DEP_ANTI)
4580 fprintf (f, "DEP_ANTI; ");
4581 if (s & DEP_OUTPUT)
4582 fprintf (f, "DEP_OUTPUT; ");
4584 fprintf (f, "\n");
4587 /* Helper function for check_cfg.
4588 Return nonzero, if edge vector pointed to by EL has edge with TYPE in
4589 its flags. */
4590 static int
4591 has_edge_p (VEC(edge,gc) *el, int type)
4593 edge e;
4594 edge_iterator ei;
4596 FOR_EACH_EDGE (e, ei, el)
4597 if (e->flags & type)
4598 return 1;
4599 return 0;
4602 /* Check few properties of CFG between HEAD and TAIL.
4603 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
4604 instruction stream. */
4605 static void
4606 check_cfg (rtx head, rtx tail)
4608 rtx next_tail;
4609 basic_block bb = 0;
4610 int not_first = 0, not_last;
4612 if (head == NULL)
4613 head = get_insns ();
4614 if (tail == NULL)
4615 tail = get_last_insn ();
4616 next_tail = NEXT_INSN (tail);
4620 not_last = head != tail;
4622 if (not_first)
4623 gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
4624 if (not_last)
4625 gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
4627 if (LABEL_P (head)
4628 || (NOTE_INSN_BASIC_BLOCK_P (head)
4629 && (!not_first
4630 || (not_first && !LABEL_P (PREV_INSN (head))))))
4632 gcc_assert (bb == 0);
4633 bb = BLOCK_FOR_INSN (head);
4634 if (bb != 0)
4635 gcc_assert (BB_HEAD (bb) == head);
4636 else
4637 /* This is the case of jump table. See inside_basic_block_p (). */
4638 gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
4641 if (bb == 0)
4643 gcc_assert (!inside_basic_block_p (head));
4644 head = NEXT_INSN (head);
4646 else
4648 gcc_assert (inside_basic_block_p (head)
4649 || NOTE_P (head));
4650 gcc_assert (BLOCK_FOR_INSN (head) == bb);
4652 if (LABEL_P (head))
4654 head = NEXT_INSN (head);
4655 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
4657 else
4659 if (control_flow_insn_p (head))
4661 gcc_assert (BB_END (bb) == head);
4663 if (any_uncondjump_p (head))
4664 gcc_assert (EDGE_COUNT (bb->succs) == 1
4665 && BARRIER_P (NEXT_INSN (head)));
4666 else if (any_condjump_p (head))
4667 gcc_assert (/* Usual case. */
4668 (EDGE_COUNT (bb->succs) > 1
4669 && !BARRIER_P (NEXT_INSN (head)))
4670 /* Or jump to the next instruction. */
4671 || (EDGE_COUNT (bb->succs) == 1
4672 && (BB_HEAD (EDGE_I (bb->succs, 0)->dest)
4673 == JUMP_LABEL (head))));
4675 if (BB_END (bb) == head)
4677 if (EDGE_COUNT (bb->succs) > 1)
4678 gcc_assert (control_flow_insn_p (head)
4679 || has_edge_p (bb->succs, EDGE_COMPLEX));
4680 bb = 0;
4683 head = NEXT_INSN (head);
4687 not_first = 1;
4689 while (head != next_tail);
4691 gcc_assert (bb == 0);
4694 /* Perform a few consistency checks of flags in different data structures. */
4695 static void
4696 check_sched_flags (void)
4698 unsigned int f = current_sched_info->flags;
4700 if (flag_sched_stalled_insns)
4701 gcc_assert (!(f & DO_SPECULATION));
4702 if (f & DO_SPECULATION)
4703 gcc_assert (!flag_sched_stalled_insns
4704 && (f & DETACH_LIFE_INFO)
4705 && spec_info
4706 && spec_info->mask);
4707 if (f & DETACH_LIFE_INFO)
4708 gcc_assert (f & USE_GLAT);
4711 /* Check global_live_at_{start, end} regsets.
4712 If FATAL_P is TRUE, then abort execution at the first failure.
4713 Otherwise, print diagnostics to STDERR (this mode is for calling
4714 from debugger). */
4715 void
4716 check_reg_live (bool fatal_p)
4718 basic_block bb;
4720 FOR_ALL_BB (bb)
4722 int i;
4724 i = bb->index;
4726 if (glat_start[i])
4728 bool b = bitmap_equal_p (bb->il.rtl->global_live_at_start,
4729 glat_start[i]);
4731 if (!b)
4733 gcc_assert (!fatal_p);
4735 fprintf (stderr, ";; check_reg_live_at_start (%d) failed.\n", i);
4739 if (glat_end[i])
4741 bool b = bitmap_equal_p (bb->il.rtl->global_live_at_end,
4742 glat_end[i]);
4744 if (!b)
4746 gcc_assert (!fatal_p);
4748 fprintf (stderr, ";; check_reg_live_at_end (%d) failed.\n", i);
4753 #endif /* ENABLE_CHECKING */
4755 #endif /* INSN_SCHEDULING */