Fix blkarg.c test to fail even on alpha.
[official-gcc.git] / gcc / sched.c
bloba8bcaf7437c57afed9ea514db9cc05d7778e25e3
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 93-98, 1999 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com)
4 Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com)
6 This file is part of GNU CC.
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
13 GNU CC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
23 /* Instruction scheduling pass.
25 This pass implements list scheduling within basic blocks. It is
26 run after flow analysis, but before register allocation. The
27 scheduler works as follows:
29 We compute insn priorities based on data dependencies. Flow
30 analysis only creates a fraction of the data-dependencies we must
31 observe: namely, only those dependencies which the combiner can be
32 expected to use. For this pass, we must therefore create the
33 remaining dependencies we need to observe: register dependencies,
34 memory dependencies, dependencies to keep function calls in order,
35 and the dependence between a conditional branch and the setting of
36 condition codes are all dealt with here.
38 The scheduler first traverses the data flow graph, starting with
39 the last instruction, and proceeding to the first, assigning
40 values to insn_priority as it goes. This sorts the instructions
41 topologically by data dependence.
43 Once priorities have been established, we order the insns using
44 list scheduling. This works as follows: starting with a list of
45 all the ready insns, and sorted according to priority number, we
46 schedule the insn from the end of the list by placing its
47 predecessors in the list according to their priority order. We
48 consider this insn scheduled by setting the pointer to the "end" of
49 the list to point to the previous insn. When an insn has no
50 predecessors, we either queue it until sufficient time has elapsed
51 or add it to the ready list. As the instructions are scheduled or
52 when stalls are introduced, the queue advances and dumps insns into
53 the ready list. When all insns down to the lowest priority have
54 been scheduled, the critical path of the basic block has been made
55 as short as possible. The remaining insns are then scheduled in
56 remaining slots.
58 Function unit conflicts are resolved during reverse list scheduling
59 by tracking the time when each insn is committed to the schedule
60 and from that, the time the function units it uses must be free.
61 As insns on the ready list are considered for scheduling, those
62 that would result in a blockage of the already committed insns are
63 queued until no blockage will result. Among the remaining insns on
64 the ready list to be considered, the first one with the largest
65 potential for causing a subsequent blockage is chosen.
67 The following list shows the order in which we want to break ties
68 among insns in the ready list:
70 1. choose insn with lowest conflict cost, ties broken by
71 2. choose insn with the longest path to end of bb, ties broken by
72 3. choose insn that kills the most registers, ties broken by
73 4. choose insn that conflicts with the most ready insns, or finally
74 5. choose insn with lowest UID.
76 Memory references complicate matters. Only if we can be certain
77 that memory references are not part of the data dependency graph
78 (via true, anti, or output dependence), can we move operations past
79 memory references. To first approximation, reads can be done
80 independently, while writes introduce dependencies. Better
81 approximations will yield fewer dependencies.
83 Dependencies set up by memory references are treated in exactly the
84 same way as other dependencies, by using LOG_LINKS.
86 Having optimized the critical path, we may have also unduly
87 extended the lifetimes of some registers. If an operation requires
88 that constants be loaded into registers, it is certainly desirable
89 to load those constants as early as necessary, but no earlier.
90 I.e., it will not do to load up a bunch of registers at the
91 beginning of a basic block only to use them at the end, if they
92 could be loaded later, since this may result in excessive register
93 utilization.
95 Note that since branches are never in basic blocks, but only end
96 basic blocks, this pass will not do any branch scheduling. But
97 that is ok, since we can use GNU's delayed branch scheduling
98 pass to take care of this case.
100 Also note that no further optimizations based on algebraic identities
101 are performed, so this pass would be a good one to perform instruction
102 splitting, such as breaking up a multiply instruction into shifts
103 and adds where that is profitable.
105 Given the memory aliasing analysis that this pass should perform,
106 it should be possible to remove redundant stores to memory, and to
107 load values from registers instead of hitting memory.
109 This pass must update information that subsequent passes expect to be
110 correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
111 reg_n_calls_crossed, and reg_live_length. Also, BLOCK_HEAD,
112 BLOCK_END.
114 The information in the line number notes is carefully retained by
115 this pass. Notes that refer to the starting and ending of
116 exception regions are also carefully retained by this pass. All
117 other NOTE insns are grouped in their same relative order at the
118 beginning of basic blocks that have been scheduled. */
120 #include "config.h"
121 #include "system.h"
122 #include "rtl.h"
123 #include "basic-block.h"
124 #include "regs.h"
125 #include "hard-reg-set.h"
126 #include "flags.h"
127 #include "insn-config.h"
128 #include "insn-attr.h"
129 #include "recog.h"
131 #ifndef INSN_SCHEDULING
132 void
133 schedule_insns (dump_file)
134 FILE *dump_file ATTRIBUTE_UNUSED;
137 #else /* INSN_SCHEDULING -- rest of file */
139 extern char *reg_known_equiv_p;
140 extern rtx *reg_known_value;
142 /* Arrays set up by scheduling for the same respective purposes as
143 similar-named arrays set up by flow analysis. We work with these
144 arrays during the scheduling pass so we can compare values against
145 unscheduled code.
147 Values of these arrays are copied at the end of this pass into the
148 arrays set up by flow analysis. */
149 static int *sched_reg_n_calls_crossed;
150 static int *sched_reg_live_length;
152 /* Element N is the next insn that sets (hard or pseudo) register
153 N within the current basic block; or zero, if there is no
154 such insn. Needed for new registers which may be introduced
155 by splitting insns. */
156 static rtx *reg_last_uses;
157 static rtx *reg_last_sets;
158 static regset reg_pending_sets;
159 static int reg_pending_sets_all;
161 /* Vector indexed by INSN_UID giving the original ordering of the insns. */
162 static int *insn_luid;
163 #define INSN_LUID(INSN) (insn_luid[INSN_UID (INSN)])
165 /* Vector indexed by INSN_UID giving each instruction a priority. */
166 static int *insn_priority;
167 #define INSN_PRIORITY(INSN) (insn_priority[INSN_UID (INSN)])
169 static short *insn_costs;
170 #define INSN_COST(INSN) insn_costs[INSN_UID (INSN)]
172 /* Vector indexed by INSN_UID giving an encoding of the function units
173 used. */
174 static short *insn_units;
175 #define INSN_UNIT(INSN) insn_units[INSN_UID (INSN)]
177 /* Vector indexed by INSN_UID giving an encoding of the blockage range
178 function. The unit and the range are encoded. */
179 static unsigned int *insn_blockage;
180 #define INSN_BLOCKAGE(INSN) insn_blockage[INSN_UID (INSN)]
181 #define UNIT_BITS 5
182 #define BLOCKAGE_MASK ((1 << BLOCKAGE_BITS) - 1)
183 #define ENCODE_BLOCKAGE(U,R) \
184 ((((U) << UNIT_BITS) << BLOCKAGE_BITS \
185 | MIN_BLOCKAGE_COST (R)) << BLOCKAGE_BITS \
186 | MAX_BLOCKAGE_COST (R))
187 #define UNIT_BLOCKED(B) ((B) >> (2 * BLOCKAGE_BITS))
188 #define BLOCKAGE_RANGE(B) \
189 (((((B) >> BLOCKAGE_BITS) & BLOCKAGE_MASK) << (HOST_BITS_PER_INT / 2)) \
190 | ((B) & BLOCKAGE_MASK))
192 /* Encodings of the `<name>_unit_blockage_range' function. */
193 #define MIN_BLOCKAGE_COST(R) ((R) >> (HOST_BITS_PER_INT / 2))
194 #define MAX_BLOCKAGE_COST(R) ((R) & ((1 << (HOST_BITS_PER_INT / 2)) - 1))
196 #define DONE_PRIORITY -1
197 #define MAX_PRIORITY 0x7fffffff
198 #define TAIL_PRIORITY 0x7ffffffe
199 #define LAUNCH_PRIORITY 0x7f000001
200 #define DONE_PRIORITY_P(INSN) (INSN_PRIORITY (INSN) < 0)
201 #define LOW_PRIORITY_P(INSN) ((INSN_PRIORITY (INSN) & 0x7f000000) == 0)
203 /* Vector indexed by INSN_UID giving number of insns referring to this insn. */
204 static int *insn_ref_count;
205 #define INSN_REF_COUNT(INSN) (insn_ref_count[INSN_UID (INSN)])
207 /* Vector indexed by INSN_UID giving line-number note in effect for each
208 insn. For line-number notes, this indicates whether the note may be
209 reused. */
210 static rtx *line_note;
211 #define LINE_NOTE(INSN) (line_note[INSN_UID (INSN)])
213 /* Vector indexed by basic block number giving the starting line-number
214 for each basic block. */
215 static rtx *line_note_head;
217 /* List of important notes we must keep around. This is a pointer to the
218 last element in the list. */
219 static rtx note_list;
221 /* Regsets telling whether a given register is live or dead before the last
222 scheduled insn. Must scan the instructions once before scheduling to
223 determine what registers are live or dead at the end of the block. */
224 static regset bb_dead_regs;
225 static regset bb_live_regs;
227 /* Regset telling whether a given register is live after the insn currently
228 being scheduled. Before processing an insn, this is equal to bb_live_regs
229 above. This is used so that we can find registers that are newly born/dead
230 after processing an insn. */
231 static regset old_live_regs;
233 /* The chain of REG_DEAD notes. REG_DEAD notes are removed from all insns
234 during the initial scan and reused later. If there are not exactly as
235 many REG_DEAD notes in the post scheduled code as there were in the
236 prescheduled code then we trigger an abort because this indicates a bug. */
237 static rtx dead_notes;
239 /* Queues, etc. */
241 /* An instruction is ready to be scheduled when all insns following it
242 have already been scheduled. It is important to ensure that all
243 insns which use its result will not be executed until its result
244 has been computed. An insn is maintained in one of four structures:
246 (P) the "Pending" set of insns which cannot be scheduled until
247 their dependencies have been satisfied.
248 (Q) the "Queued" set of insns that can be scheduled when sufficient
249 time has passed.
250 (R) the "Ready" list of unscheduled, uncommitted insns.
251 (S) the "Scheduled" list of insns.
253 Initially, all insns are either "Pending" or "Ready" depending on
254 whether their dependencies are satisfied.
256 Insns move from the "Ready" list to the "Scheduled" list as they
257 are committed to the schedule. As this occurs, the insns in the
258 "Pending" list have their dependencies satisfied and move to either
259 the "Ready" list or the "Queued" set depending on whether
260 sufficient time has passed to make them ready. As time passes,
261 insns move from the "Queued" set to the "Ready" list. Insns may
262 move from the "Ready" list to the "Queued" set if they are blocked
263 due to a function unit conflict.
265 The "Pending" list (P) are the insns in the LOG_LINKS of the unscheduled
266 insns, i.e., those that are ready, queued, and pending.
267 The "Queued" set (Q) is implemented by the variable `insn_queue'.
268 The "Ready" list (R) is implemented by the variables `ready' and
269 `n_ready'.
270 The "Scheduled" list (S) is the new insn chain built by this pass.
272 The transition (R->S) is implemented in the scheduling loop in
273 `schedule_block' when the best insn to schedule is chosen.
274 The transition (R->Q) is implemented in `schedule_select' when an
275 insn is found to have a function unit conflict with the already
276 committed insns.
277 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
278 insns move from the ready list to the scheduled list.
279 The transition (Q->R) is implemented at the top of the scheduling
280 loop in `schedule_block' as time passes or stalls are introduced. */
282 /* Implement a circular buffer to delay instructions until sufficient
283 time has passed. INSN_QUEUE_SIZE is a power of two larger than
284 MAX_BLOCKAGE and MAX_READY_COST computed by genattr.c. This is the
285 longest time an isnsn may be queued. */
286 static rtx insn_queue[INSN_QUEUE_SIZE];
287 static int q_ptr = 0;
288 static int q_size = 0;
289 #define NEXT_Q(X) (((X)+1) & (INSN_QUEUE_SIZE-1))
290 #define NEXT_Q_AFTER(X,C) (((X)+C) & (INSN_QUEUE_SIZE-1))
292 /* Vector indexed by INSN_UID giving the minimum clock tick at which
293 the insn becomes ready. This is used to note timing constraints for
294 insns in the pending list. */
295 static int *insn_tick;
296 #define INSN_TICK(INSN) (insn_tick[INSN_UID (INSN)])
298 /* Data structure for keeping track of register information
299 during that register's life. */
301 struct sometimes
303 int regno;
304 int live_length;
305 int calls_crossed;
308 /* Forward declarations. */
309 static void add_dependence PROTO((rtx, rtx, enum reg_note));
310 static void remove_dependence PROTO((rtx, rtx));
311 static rtx find_insn_list PROTO((rtx, rtx));
312 static int insn_unit PROTO((rtx));
313 static unsigned int blockage_range PROTO((int, rtx));
314 static void clear_units PROTO((void));
315 static void prepare_unit PROTO((int));
316 static int actual_hazard_this_instance PROTO((int, int, rtx, int, int));
317 static void schedule_unit PROTO((int, rtx, int));
318 static int actual_hazard PROTO((int, rtx, int, int));
319 static int potential_hazard PROTO((int, rtx, int));
320 static int insn_cost PROTO((rtx, rtx, rtx));
321 static int priority PROTO((rtx));
322 static void free_pending_lists PROTO((void));
323 static void add_insn_mem_dependence PROTO((rtx *, rtx *, rtx, rtx));
324 static void flush_pending_lists PROTO((rtx, int));
325 static void sched_analyze_1 PROTO((rtx, rtx));
326 static void sched_analyze_2 PROTO((rtx, rtx));
327 static void sched_analyze_insn PROTO((rtx, rtx, rtx));
328 static int sched_analyze PROTO((rtx, rtx));
329 static void sched_note_set PROTO((rtx, int));
330 static int rank_for_schedule PROTO((const GENERIC_PTR, const GENERIC_PTR));
331 static void swap_sort PROTO((rtx *, int));
332 static void queue_insn PROTO((rtx, int));
333 static int birthing_insn_p PROTO((rtx));
334 static void adjust_priority PROTO((rtx));
335 static int schedule_insn PROTO((rtx, rtx *, int, int));
336 static int schedule_select PROTO((rtx *, int, int, FILE *));
337 static void create_reg_dead_note PROTO((rtx, rtx));
338 static void attach_deaths PROTO((rtx, rtx, int));
339 static void attach_deaths_insn PROTO((rtx));
340 static rtx unlink_notes PROTO((rtx, rtx));
341 static int new_sometimes_live PROTO((struct sometimes *, int, int));
342 static void finish_sometimes_live PROTO((struct sometimes *, int));
343 static rtx reemit_notes PROTO((rtx, rtx));
344 static void schedule_block PROTO((int, FILE *));
345 static void split_hard_reg_notes PROTO((rtx, rtx, rtx));
346 static void new_insn_dead_notes PROTO((rtx, rtx, rtx, rtx));
347 static void update_n_sets PROTO((rtx, int));
349 /* Main entry point of this file. */
350 void schedule_insns PROTO((FILE *));
352 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
354 /* Helper functions for instruction scheduling. */
356 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
357 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
358 of dependence that this link represents. */
360 static void
361 add_dependence (insn, elem, dep_type)
362 rtx insn;
363 rtx elem;
364 enum reg_note dep_type;
366 rtx link, next;
368 /* Don't depend an insn on itself. */
369 if (insn == elem)
370 return;
372 /* If elem is part of a sequence that must be scheduled together, then
373 make the dependence point to the last insn of the sequence.
374 When HAVE_cc0, it is possible for NOTEs to exist between users and
375 setters of the condition codes, so we must skip past notes here.
376 Otherwise, NOTEs are impossible here. */
378 next = NEXT_INSN (elem);
380 #ifdef HAVE_cc0
381 while (next && GET_CODE (next) == NOTE)
382 next = NEXT_INSN (next);
383 #endif
385 if (next && SCHED_GROUP_P (next)
386 && GET_CODE (next) != CODE_LABEL)
388 /* Notes will never intervene here though, so don't bother checking
389 for them. */
390 /* We must reject CODE_LABELs, so that we don't get confused by one
391 that has LABEL_PRESERVE_P set, which is represented by the same
392 bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
393 SCHED_GROUP_P. */
394 while (NEXT_INSN (next) && SCHED_GROUP_P (NEXT_INSN (next))
395 && GET_CODE (NEXT_INSN (next)) != CODE_LABEL)
396 next = NEXT_INSN (next);
398 /* Again, don't depend an insn on itself. */
399 if (insn == next)
400 return;
402 /* Make the dependence to NEXT, the last insn of the group, instead
403 of the original ELEM. */
404 elem = next;
407 /* Check that we don't already have this dependence. */
408 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
409 if (XEXP (link, 0) == elem)
411 /* If this is a more restrictive type of dependence than the existing
412 one, then change the existing dependence to this type. */
413 if ((int) dep_type < (int) REG_NOTE_KIND (link))
414 PUT_REG_NOTE_KIND (link, dep_type);
415 return;
417 /* Might want to check one level of transitivity to save conses. */
419 link = rtx_alloc (INSN_LIST);
420 /* Insn dependency, not data dependency. */
421 PUT_REG_NOTE_KIND (link, dep_type);
422 XEXP (link, 0) = elem;
423 XEXP (link, 1) = LOG_LINKS (insn);
424 LOG_LINKS (insn) = link;
427 /* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
428 of INSN. Abort if not found. */
430 static void
431 remove_dependence (insn, elem)
432 rtx insn;
433 rtx elem;
435 rtx prev, link;
436 int found = 0;
438 for (prev = 0, link = LOG_LINKS (insn); link; link = XEXP (link, 1))
440 if (XEXP (link, 0) == elem)
442 RTX_INTEGRATED_P (link) = 1;
443 if (prev)
444 XEXP (prev, 1) = XEXP (link, 1);
445 else
446 LOG_LINKS (insn) = XEXP (link, 1);
447 found = 1;
449 else
450 prev = link;
453 if (! found)
454 abort ();
455 return;
458 #ifndef __GNUC__
459 #define __inline
460 #endif
462 /* Computation of memory dependencies. */
464 /* The *_insns and *_mems are paired lists. Each pending memory operation
465 will have a pointer to the MEM rtx on one list and a pointer to the
466 containing insn on the other list in the same place in the list. */
468 /* We can't use add_dependence like the old code did, because a single insn
469 may have multiple memory accesses, and hence needs to be on the list
470 once for each memory access. Add_dependence won't let you add an insn
471 to a list more than once. */
473 /* An INSN_LIST containing all insns with pending read operations. */
474 static rtx pending_read_insns;
476 /* An EXPR_LIST containing all MEM rtx's which are pending reads. */
477 static rtx pending_read_mems;
479 /* An INSN_LIST containing all insns with pending write operations. */
480 static rtx pending_write_insns;
482 /* An EXPR_LIST containing all MEM rtx's which are pending writes. */
483 static rtx pending_write_mems;
485 /* Indicates the combined length of the two pending lists. We must prevent
486 these lists from ever growing too large since the number of dependencies
487 produced is at least O(N*N), and execution time is at least O(4*N*N), as
488 a function of the length of these pending lists. */
490 static int pending_lists_length;
492 /* An INSN_LIST containing all INSN_LISTs allocated but currently unused. */
494 static rtx unused_insn_list;
496 /* An EXPR_LIST containing all EXPR_LISTs allocated but currently unused. */
498 static rtx unused_expr_list;
500 /* The last insn upon which all memory references must depend.
501 This is an insn which flushed the pending lists, creating a dependency
502 between it and all previously pending memory references. This creates
503 a barrier (or a checkpoint) which no memory reference is allowed to cross.
505 This includes all non constant CALL_INSNs. When we do interprocedural
506 alias analysis, this restriction can be relaxed.
507 This may also be an INSN that writes memory if the pending lists grow
508 too large. */
510 static rtx last_pending_memory_flush;
512 /* The last function call we have seen. All hard regs, and, of course,
513 the last function call, must depend on this. */
515 static rtx last_function_call;
517 /* The LOG_LINKS field of this is a list of insns which use a pseudo register
518 that does not already cross a call. We create dependencies between each
519 of those insn and the next call insn, to ensure that they won't cross a call
520 after scheduling is done. */
522 static rtx sched_before_next_call;
524 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
525 so that insns independent of the last scheduled insn will be preferred
526 over dependent instructions. */
528 static rtx last_scheduled_insn;
530 /* Process an insn's memory dependencies. There are four kinds of
531 dependencies:
533 (0) read dependence: read follows read
534 (1) true dependence: read follows write
535 (2) anti dependence: write follows read
536 (3) output dependence: write follows write
538 We are careful to build only dependencies which actually exist, and
539 use transitivity to avoid building too many links. */
541 /* Return the INSN_LIST containing INSN in LIST, or NULL
542 if LIST does not contain INSN. */
544 __inline static rtx
545 find_insn_list (insn, list)
546 rtx insn;
547 rtx list;
549 while (list)
551 if (XEXP (list, 0) == insn)
552 return list;
553 list = XEXP (list, 1);
555 return 0;
558 /* Compute the function units used by INSN. This caches the value
559 returned by function_units_used. A function unit is encoded as the
560 unit number if the value is non-negative and the compliment of a
561 mask if the value is negative. A function unit index is the
562 non-negative encoding. */
564 __inline static int
565 insn_unit (insn)
566 rtx insn;
568 register int unit = INSN_UNIT (insn);
570 if (unit == 0)
572 recog_memoized (insn);
574 /* A USE insn, or something else we don't need to understand.
575 We can't pass these directly to function_units_used because it will
576 trigger a fatal error for unrecognizable insns. */
577 if (INSN_CODE (insn) < 0)
578 unit = -1;
579 else
581 unit = function_units_used (insn);
582 /* Increment non-negative values so we can cache zero. */
583 if (unit >= 0) unit++;
585 /* We only cache 16 bits of the result, so if the value is out of
586 range, don't cache it. */
587 if (FUNCTION_UNITS_SIZE < HOST_BITS_PER_SHORT
588 || unit >= 0
589 || (~unit & ((1 << (HOST_BITS_PER_SHORT - 1)) - 1)) == 0)
590 INSN_UNIT (insn) = unit;
592 return (unit > 0 ? unit - 1 : unit);
595 /* Compute the blockage range for executing INSN on UNIT. This caches
596 the value returned by the blockage_range_function for the unit.
597 These values are encoded in an int where the upper half gives the
598 minimum value and the lower half gives the maximum value. */
600 __inline static unsigned int
601 blockage_range (unit, insn)
602 int unit;
603 rtx insn;
605 unsigned int blockage = INSN_BLOCKAGE (insn);
606 unsigned int range;
608 if ((int) UNIT_BLOCKED (blockage) != unit + 1)
610 range = function_units[unit].blockage_range_function (insn);
611 /* We only cache the blockage range for one unit and then only if
612 the values fit. */
613 if (HOST_BITS_PER_INT >= UNIT_BITS + 2 * BLOCKAGE_BITS)
614 INSN_BLOCKAGE (insn) = ENCODE_BLOCKAGE (unit + 1, range);
616 else
617 range = BLOCKAGE_RANGE (blockage);
619 return range;
622 /* A vector indexed by function unit instance giving the last insn to use
623 the unit. The value of the function unit instance index for unit U
624 instance I is (U + I * FUNCTION_UNITS_SIZE). */
625 static rtx unit_last_insn[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
627 /* A vector indexed by function unit instance giving the minimum time when
628 the unit will unblock based on the maximum blockage cost. */
629 static int unit_tick[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
631 /* A vector indexed by function unit number giving the number of insns
632 that remain to use the unit. */
633 static int unit_n_insns[FUNCTION_UNITS_SIZE];
635 /* Reset the function unit state to the null state. */
637 static void
638 clear_units ()
640 bzero ((char *) unit_last_insn, sizeof (unit_last_insn));
641 bzero ((char *) unit_tick, sizeof (unit_tick));
642 bzero ((char *) unit_n_insns, sizeof (unit_n_insns));
645 /* Record an insn as one that will use the units encoded by UNIT. */
647 __inline static void
648 prepare_unit (unit)
649 int unit;
651 int i;
653 if (unit >= 0)
654 unit_n_insns[unit]++;
655 else
656 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
657 if ((unit & 1) != 0)
658 prepare_unit (i);
661 /* Return the actual hazard cost of executing INSN on the unit UNIT,
662 instance INSTANCE at time CLOCK if the previous actual hazard cost
663 was COST. */
665 __inline static int
666 actual_hazard_this_instance (unit, instance, insn, clock, cost)
667 int unit, instance, clock, cost;
668 rtx insn;
670 int tick = unit_tick[instance];
672 if (tick - clock > cost)
674 /* The scheduler is operating in reverse, so INSN is the executing
675 insn and the unit's last insn is the candidate insn. We want a
676 more exact measure of the blockage if we execute INSN at CLOCK
677 given when we committed the execution of the unit's last insn.
679 The blockage value is given by either the unit's max blockage
680 constant, blockage range function, or blockage function. Use
681 the most exact form for the given unit. */
683 if (function_units[unit].blockage_range_function)
685 if (function_units[unit].blockage_function)
686 tick += (function_units[unit].blockage_function
687 (insn, unit_last_insn[instance])
688 - function_units[unit].max_blockage);
689 else
690 tick += ((int) MAX_BLOCKAGE_COST (blockage_range (unit, insn))
691 - function_units[unit].max_blockage);
693 if (tick - clock > cost)
694 cost = tick - clock;
696 return cost;
699 /* Record INSN as having begun execution on the units encoded by UNIT at
700 time CLOCK. */
702 __inline static void
703 schedule_unit (unit, insn, clock)
704 int unit, clock;
705 rtx insn;
707 int i;
709 if (unit >= 0)
711 int instance = unit;
712 #if MAX_MULTIPLICITY > 1
713 /* Find the first free instance of the function unit and use that
714 one. We assume that one is free. */
715 for (i = function_units[unit].multiplicity - 1; i > 0; i--)
717 if (! actual_hazard_this_instance (unit, instance, insn, clock, 0))
718 break;
719 instance += FUNCTION_UNITS_SIZE;
721 #endif
722 unit_last_insn[instance] = insn;
723 unit_tick[instance] = (clock + function_units[unit].max_blockage);
725 else
726 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
727 if ((unit & 1) != 0)
728 schedule_unit (i, insn, clock);
731 /* Return the actual hazard cost of executing INSN on the units encoded by
732 UNIT at time CLOCK if the previous actual hazard cost was COST. */
734 __inline static int
735 actual_hazard (unit, insn, clock, cost)
736 int unit, clock, cost;
737 rtx insn;
739 int i;
741 if (unit >= 0)
743 /* Find the instance of the function unit with the minimum hazard. */
744 int instance = unit;
745 int best_cost = actual_hazard_this_instance (unit, instance, insn,
746 clock, cost);
747 #if MAX_MULTIPLICITY > 1
748 int this_cost;
750 if (best_cost > cost)
752 for (i = function_units[unit].multiplicity - 1; i > 0; i--)
754 instance += FUNCTION_UNITS_SIZE;
755 this_cost = actual_hazard_this_instance (unit, instance, insn,
756 clock, cost);
757 if (this_cost < best_cost)
759 best_cost = this_cost;
760 if (this_cost <= cost)
761 break;
765 #endif
766 cost = MAX (cost, best_cost);
768 else
769 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
770 if ((unit & 1) != 0)
771 cost = actual_hazard (i, insn, clock, cost);
773 return cost;
776 /* Return the potential hazard cost of executing an instruction on the
777 units encoded by UNIT if the previous potential hazard cost was COST.
778 An insn with a large blockage time is chosen in preference to one
779 with a smaller time; an insn that uses a unit that is more likely
780 to be used is chosen in preference to one with a unit that is less
781 used. We are trying to minimize a subsequent actual hazard. */
783 __inline static int
784 potential_hazard (unit, insn, cost)
785 int unit, cost;
786 rtx insn;
788 int i, ncost;
789 unsigned int minb, maxb;
791 if (unit >= 0)
793 minb = maxb = function_units[unit].max_blockage;
794 if (maxb > 1)
796 if (function_units[unit].blockage_range_function)
798 maxb = minb = blockage_range (unit, insn);
799 maxb = MAX_BLOCKAGE_COST (maxb);
800 minb = MIN_BLOCKAGE_COST (minb);
803 if (maxb > 1)
805 /* Make the number of instructions left dominate. Make the
806 minimum delay dominate the maximum delay. If all these
807 are the same, use the unit number to add an arbitrary
808 ordering. Other terms can be added. */
809 ncost = minb * 0x40 + maxb;
810 ncost *= (unit_n_insns[unit] - 1) * 0x1000 + unit;
811 if (ncost > cost)
812 cost = ncost;
816 else
817 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
818 if ((unit & 1) != 0)
819 cost = potential_hazard (i, insn, cost);
821 return cost;
824 /* Compute cost of executing INSN given the dependence LINK on the insn USED.
825 This is the number of virtual cycles taken between instruction issue and
826 instruction results. */
828 __inline static int
829 insn_cost (insn, link, used)
830 rtx insn, link, used;
832 register int cost = INSN_COST (insn);
834 if (cost == 0)
836 recog_memoized (insn);
838 /* A USE insn, or something else we don't need to understand.
839 We can't pass these directly to result_ready_cost because it will
840 trigger a fatal error for unrecognizable insns. */
841 if (INSN_CODE (insn) < 0)
843 INSN_COST (insn) = 1;
844 return 1;
846 else
848 cost = result_ready_cost (insn);
850 if (cost < 1)
851 cost = 1;
853 INSN_COST (insn) = cost;
857 /* A USE insn should never require the value used to be computed. This
858 allows the computation of a function's result and parameter values to
859 overlap the return and call. */
860 recog_memoized (used);
861 if (INSN_CODE (used) < 0)
862 LINK_COST_FREE (link) = 1;
864 /* If some dependencies vary the cost, compute the adjustment. Most
865 commonly, the adjustment is complete: either the cost is ignored
866 (in the case of an output- or anti-dependence), or the cost is
867 unchanged. These values are cached in the link as LINK_COST_FREE
868 and LINK_COST_ZERO. */
870 if (LINK_COST_FREE (link))
871 cost = 1;
872 #ifdef ADJUST_COST
873 else if (! LINK_COST_ZERO (link))
875 int ncost = cost;
877 ADJUST_COST (used, link, insn, ncost);
878 if (ncost <= 1)
879 LINK_COST_FREE (link) = ncost = 1;
880 if (cost == ncost)
881 LINK_COST_ZERO (link) = 1;
882 cost = ncost;
884 #endif
885 return cost;
888 /* Compute the priority number for INSN. */
890 static int
891 priority (insn)
892 rtx insn;
894 if (insn && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
896 int prev_priority;
897 int max_priority;
898 int this_priority = INSN_PRIORITY (insn);
899 rtx prev;
901 if (this_priority > 0)
902 return this_priority;
904 max_priority = 1;
906 /* Nonzero if these insns must be scheduled together. */
907 if (SCHED_GROUP_P (insn))
909 prev = insn;
910 while (SCHED_GROUP_P (prev))
912 prev = PREV_INSN (prev);
913 INSN_REF_COUNT (prev) += 1;
917 for (prev = LOG_LINKS (insn); prev; prev = XEXP (prev, 1))
919 rtx x = XEXP (prev, 0);
921 /* If this was a duplicate of a dependence we already deleted,
922 ignore it. */
923 if (RTX_INTEGRATED_P (prev))
924 continue;
926 /* A dependence pointing to a note or deleted insn is always
927 obsolete, because sched_analyze_insn will have created any
928 necessary new dependences which replace it. Notes and deleted
929 insns can be created when instructions are deleted by insn
930 splitting, or by register allocation. */
931 if (GET_CODE (x) == NOTE || INSN_DELETED_P (x))
933 remove_dependence (insn, x);
934 continue;
937 /* Clear the link cost adjustment bits. */
938 LINK_COST_FREE (prev) = 0;
939 #ifdef ADJUST_COST
940 LINK_COST_ZERO (prev) = 0;
941 #endif
943 /* This priority calculation was chosen because it results in the
944 least instruction movement, and does not hurt the performance
945 of the resulting code compared to the old algorithm.
946 This makes the sched algorithm more stable, which results
947 in better code, because there is less register pressure,
948 cross jumping is more likely to work, and debugging is easier.
950 When all instructions have a latency of 1, there is no need to
951 move any instructions. Subtracting one here ensures that in such
952 cases all instructions will end up with a priority of one, and
953 hence no scheduling will be done.
955 The original code did not subtract the one, and added the
956 insn_cost of the current instruction to its priority (e.g.
957 move the insn_cost call down to the end). */
959 prev_priority = priority (x) + insn_cost (x, prev, insn) - 1;
961 if (prev_priority > max_priority)
962 max_priority = prev_priority;
963 INSN_REF_COUNT (x) += 1;
966 prepare_unit (insn_unit (insn));
967 INSN_PRIORITY (insn) = max_priority;
968 return INSN_PRIORITY (insn);
970 return 0;
973 /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
974 them to the unused_*_list variables, so that they can be reused. */
976 static void
977 free_pending_lists ()
979 register rtx link, prev_link;
981 if (pending_read_insns)
983 prev_link = pending_read_insns;
984 link = XEXP (prev_link, 1);
986 while (link)
988 prev_link = link;
989 link = XEXP (link, 1);
992 XEXP (prev_link, 1) = unused_insn_list;
993 unused_insn_list = pending_read_insns;
994 pending_read_insns = 0;
997 if (pending_write_insns)
999 prev_link = pending_write_insns;
1000 link = XEXP (prev_link, 1);
1002 while (link)
1004 prev_link = link;
1005 link = XEXP (link, 1);
1008 XEXP (prev_link, 1) = unused_insn_list;
1009 unused_insn_list = pending_write_insns;
1010 pending_write_insns = 0;
1013 if (pending_read_mems)
1015 prev_link = pending_read_mems;
1016 link = XEXP (prev_link, 1);
1018 while (link)
1020 prev_link = link;
1021 link = XEXP (link, 1);
1024 XEXP (prev_link, 1) = unused_expr_list;
1025 unused_expr_list = pending_read_mems;
1026 pending_read_mems = 0;
1029 if (pending_write_mems)
1031 prev_link = pending_write_mems;
1032 link = XEXP (prev_link, 1);
1034 while (link)
1036 prev_link = link;
1037 link = XEXP (link, 1);
1040 XEXP (prev_link, 1) = unused_expr_list;
1041 unused_expr_list = pending_write_mems;
1042 pending_write_mems = 0;
1046 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1047 The MEM is a memory reference contained within INSN, which we are saving
1048 so that we can do memory aliasing on it. */
1050 static void
1051 add_insn_mem_dependence (insn_list, mem_list, insn, mem)
1052 rtx *insn_list, *mem_list, insn, mem;
1054 register rtx link;
1056 if (unused_insn_list)
1058 link = unused_insn_list;
1059 unused_insn_list = XEXP (link, 1);
1061 else
1062 link = rtx_alloc (INSN_LIST);
1063 XEXP (link, 0) = insn;
1064 XEXP (link, 1) = *insn_list;
1065 *insn_list = link;
1067 if (unused_expr_list)
1069 link = unused_expr_list;
1070 unused_expr_list = XEXP (link, 1);
1072 else
1073 link = rtx_alloc (EXPR_LIST);
1074 XEXP (link, 0) = mem;
1075 XEXP (link, 1) = *mem_list;
1076 *mem_list = link;
1078 pending_lists_length++;
1081 /* Make a dependency between every memory reference on the pending lists
1082 and INSN, thus flushing the pending lists. If ONLY_WRITE, don't flush
1083 the read list. */
1085 static void
1086 flush_pending_lists (insn, only_write)
1087 rtx insn;
1088 int only_write;
1090 rtx link;
1092 while (pending_read_insns && ! only_write)
1094 add_dependence (insn, XEXP (pending_read_insns, 0), REG_DEP_ANTI);
1096 link = pending_read_insns;
1097 pending_read_insns = XEXP (pending_read_insns, 1);
1098 XEXP (link, 1) = unused_insn_list;
1099 unused_insn_list = link;
1101 link = pending_read_mems;
1102 pending_read_mems = XEXP (pending_read_mems, 1);
1103 XEXP (link, 1) = unused_expr_list;
1104 unused_expr_list = link;
1106 while (pending_write_insns)
1108 add_dependence (insn, XEXP (pending_write_insns, 0), REG_DEP_ANTI);
1110 link = pending_write_insns;
1111 pending_write_insns = XEXP (pending_write_insns, 1);
1112 XEXP (link, 1) = unused_insn_list;
1113 unused_insn_list = link;
1115 link = pending_write_mems;
1116 pending_write_mems = XEXP (pending_write_mems, 1);
1117 XEXP (link, 1) = unused_expr_list;
1118 unused_expr_list = link;
1120 pending_lists_length = 0;
1122 if (last_pending_memory_flush)
1123 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1125 last_pending_memory_flush = insn;
1128 /* Analyze a single SET or CLOBBER rtx, X, creating all dependencies generated
1129 by the write to the destination of X, and reads of everything mentioned. */
1131 static void
1132 sched_analyze_1 (x, insn)
1133 rtx x;
1134 rtx insn;
1136 register int regno;
1137 register rtx dest = SET_DEST (x);
1139 if (dest == 0)
1140 return;
1142 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
1143 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1145 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1147 /* The second and third arguments are values read by this insn. */
1148 sched_analyze_2 (XEXP (dest, 1), insn);
1149 sched_analyze_2 (XEXP (dest, 2), insn);
1151 dest = SUBREG_REG (dest);
1154 if (GET_CODE (dest) == REG)
1156 register int i;
1158 regno = REGNO (dest);
1160 /* A hard reg in a wide mode may really be multiple registers.
1161 If so, mark all of them just like the first. */
1162 if (regno < FIRST_PSEUDO_REGISTER)
1164 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
1165 while (--i >= 0)
1167 rtx u;
1169 for (u = reg_last_uses[regno+i]; u; u = XEXP (u, 1))
1170 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1171 reg_last_uses[regno + i] = 0;
1172 if (reg_last_sets[regno + i])
1173 add_dependence (insn, reg_last_sets[regno + i],
1174 REG_DEP_OUTPUT);
1175 SET_REGNO_REG_SET (reg_pending_sets, regno + i);
1176 if ((call_used_regs[i] || global_regs[i])
1177 && last_function_call)
1178 /* Function calls clobber all call_used regs. */
1179 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1182 else
1184 rtx u;
1186 for (u = reg_last_uses[regno]; u; u = XEXP (u, 1))
1187 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1188 reg_last_uses[regno] = 0;
1189 if (reg_last_sets[regno])
1190 add_dependence (insn, reg_last_sets[regno], REG_DEP_OUTPUT);
1191 SET_REGNO_REG_SET (reg_pending_sets, regno);
1193 /* Pseudos that are REG_EQUIV to something may be replaced
1194 by that during reloading. We need only add dependencies for
1195 the address in the REG_EQUIV note. */
1196 if (! reload_completed
1197 && reg_known_equiv_p[regno]
1198 && GET_CODE (reg_known_value[regno]) == MEM)
1199 sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
1201 /* Don't let it cross a call after scheduling if it doesn't
1202 already cross one. */
1203 if (REG_N_CALLS_CROSSED (regno) == 0 && last_function_call)
1204 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1207 else if (GET_CODE (dest) == MEM)
1209 /* Writing memory. */
1211 if (pending_lists_length > 32)
1213 /* Flush all pending reads and writes to prevent the pending lists
1214 from getting any larger. Insn scheduling runs too slowly when
1215 these lists get long. The number 32 was chosen because it
1216 seems like a reasonable number. When compiling GCC with itself,
1217 this flush occurs 8 times for sparc, and 10 times for m88k using
1218 the number 32. */
1219 flush_pending_lists (insn, 0);
1221 else
1223 rtx pending, pending_mem;
1225 pending = pending_read_insns;
1226 pending_mem = pending_read_mems;
1227 while (pending)
1229 /* If a dependency already exists, don't create a new one. */
1230 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1231 if (anti_dependence (XEXP (pending_mem, 0), dest))
1232 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1234 pending = XEXP (pending, 1);
1235 pending_mem = XEXP (pending_mem, 1);
1238 pending = pending_write_insns;
1239 pending_mem = pending_write_mems;
1240 while (pending)
1242 /* If a dependency already exists, don't create a new one. */
1243 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1244 if (output_dependence (XEXP (pending_mem, 0), dest))
1245 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1247 pending = XEXP (pending, 1);
1248 pending_mem = XEXP (pending_mem, 1);
1251 if (last_pending_memory_flush)
1252 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1254 add_insn_mem_dependence (&pending_write_insns, &pending_write_mems,
1255 insn, dest);
1257 sched_analyze_2 (XEXP (dest, 0), insn);
1260 /* Analyze reads. */
1261 if (GET_CODE (x) == SET)
1262 sched_analyze_2 (SET_SRC (x), insn);
1265 /* Analyze the uses of memory and registers in rtx X in INSN. */
1267 static void
1268 sched_analyze_2 (x, insn)
1269 rtx x;
1270 rtx insn;
1272 register int i;
1273 register int j;
1274 register enum rtx_code code;
1275 register char *fmt;
1277 if (x == 0)
1278 return;
1280 code = GET_CODE (x);
1282 switch (code)
1284 case CONST_INT:
1285 case CONST_DOUBLE:
1286 case SYMBOL_REF:
1287 case CONST:
1288 case LABEL_REF:
1289 /* Ignore constants. Note that we must handle CONST_DOUBLE here
1290 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
1291 this does not mean that this insn is using cc0. */
1292 return;
1294 #ifdef HAVE_cc0
1295 case CC0:
1297 rtx link, prev;
1299 /* User of CC0 depends on immediately preceding insn. */
1300 SCHED_GROUP_P (insn) = 1;
1302 /* There may be a note before this insn now, but all notes will
1303 be removed before we actually try to schedule the insns, so
1304 it won't cause a problem later. We must avoid it here though. */
1305 prev = prev_nonnote_insn (insn);
1307 /* Make a copy of all dependencies on the immediately previous insn,
1308 and add to this insn. This is so that all the dependencies will
1309 apply to the group. Remove an explicit dependence on this insn
1310 as SCHED_GROUP_P now represents it. */
1312 if (find_insn_list (prev, LOG_LINKS (insn)))
1313 remove_dependence (insn, prev);
1315 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
1316 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
1318 return;
1320 #endif
1322 case REG:
1324 int regno = REGNO (x);
1325 if (regno < FIRST_PSEUDO_REGISTER)
1327 int i;
1329 i = HARD_REGNO_NREGS (regno, GET_MODE (x));
1330 while (--i >= 0)
1332 reg_last_uses[regno + i]
1333 = gen_rtx_INSN_LIST (VOIDmode,
1334 insn, reg_last_uses[regno + i]);
1335 if (reg_last_sets[regno + i])
1336 add_dependence (insn, reg_last_sets[regno + i], 0);
1337 if ((call_used_regs[regno + i] || global_regs[regno + i])
1338 && last_function_call)
1339 /* Function calls clobber all call_used regs. */
1340 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1343 else
1345 reg_last_uses[regno]
1346 = gen_rtx_INSN_LIST (VOIDmode, insn, reg_last_uses[regno]);
1347 if (reg_last_sets[regno])
1348 add_dependence (insn, reg_last_sets[regno], 0);
1350 /* Pseudos that are REG_EQUIV to something may be replaced
1351 by that during reloading. We need only add dependencies for
1352 the address in the REG_EQUIV note. */
1353 if (! reload_completed
1354 && reg_known_equiv_p[regno]
1355 && GET_CODE (reg_known_value[regno]) == MEM)
1356 sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
1358 /* If the register does not already cross any calls, then add this
1359 insn to the sched_before_next_call list so that it will still
1360 not cross calls after scheduling. */
1361 if (REG_N_CALLS_CROSSED (regno) == 0)
1362 add_dependence (sched_before_next_call, insn, REG_DEP_ANTI);
1364 return;
1367 case MEM:
1369 /* Reading memory. */
1371 rtx pending, pending_mem;
1373 pending = pending_read_insns;
1374 pending_mem = pending_read_mems;
1375 while (pending)
1377 /* If a dependency already exists, don't create a new one. */
1378 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1379 if (read_dependence (XEXP (pending_mem, 0), x))
1380 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1382 pending = XEXP (pending, 1);
1383 pending_mem = XEXP (pending_mem, 1);
1386 pending = pending_write_insns;
1387 pending_mem = pending_write_mems;
1388 while (pending)
1390 /* If a dependency already exists, don't create a new one. */
1391 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1392 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
1393 x, rtx_varies_p))
1394 add_dependence (insn, XEXP (pending, 0), 0);
1396 pending = XEXP (pending, 1);
1397 pending_mem = XEXP (pending_mem, 1);
1399 if (last_pending_memory_flush)
1400 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1402 /* Always add these dependencies to pending_reads, since
1403 this insn may be followed by a write. */
1404 add_insn_mem_dependence (&pending_read_insns, &pending_read_mems,
1405 insn, x);
1407 /* Take advantage of tail recursion here. */
1408 sched_analyze_2 (XEXP (x, 0), insn);
1409 return;
1412 case ASM_OPERANDS:
1413 case ASM_INPUT:
1414 case UNSPEC_VOLATILE:
1415 case TRAP_IF:
1417 rtx u;
1419 /* Traditional and volatile asm instructions must be considered to use
1420 and clobber all hard registers, all pseudo-registers and all of
1421 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
1423 Consider for instance a volatile asm that changes the fpu rounding
1424 mode. An insn should not be moved across this even if it only uses
1425 pseudo-regs because it might give an incorrectly rounded result. */
1426 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
1428 int max_reg = max_reg_num ();
1429 for (i = 0; i < max_reg; i++)
1431 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1432 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1433 reg_last_uses[i] = 0;
1434 if (reg_last_sets[i])
1435 add_dependence (insn, reg_last_sets[i], 0);
1437 reg_pending_sets_all = 1;
1439 flush_pending_lists (insn, 0);
1442 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
1443 We can not just fall through here since then we would be confused
1444 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
1445 traditional asms unlike their normal usage. */
1447 if (code == ASM_OPERANDS)
1449 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
1450 sched_analyze_2 (ASM_OPERANDS_INPUT (x, j), insn);
1451 return;
1453 break;
1456 case PRE_DEC:
1457 case POST_DEC:
1458 case PRE_INC:
1459 case POST_INC:
1460 /* These both read and modify the result. We must handle them as writes
1461 to get proper dependencies for following instructions. We must handle
1462 them as reads to get proper dependencies from this to previous
1463 instructions. Thus we need to pass them to both sched_analyze_1
1464 and sched_analyze_2. We must call sched_analyze_2 first in order
1465 to get the proper antecedent for the read. */
1466 sched_analyze_2 (XEXP (x, 0), insn);
1467 sched_analyze_1 (x, insn);
1468 return;
1470 default:
1471 break;
1474 /* Other cases: walk the insn. */
1475 fmt = GET_RTX_FORMAT (code);
1476 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1478 if (fmt[i] == 'e')
1479 sched_analyze_2 (XEXP (x, i), insn);
1480 else if (fmt[i] == 'E')
1481 for (j = 0; j < XVECLEN (x, i); j++)
1482 sched_analyze_2 (XVECEXP (x, i, j), insn);
1486 /* Analyze an INSN with pattern X to find all dependencies. */
1488 static void
1489 sched_analyze_insn (x, insn, loop_notes)
1490 rtx x, insn;
1491 rtx loop_notes;
1493 register RTX_CODE code = GET_CODE (x);
1494 rtx link;
1495 int maxreg = max_reg_num ();
1496 int i;
1498 if (code == SET || code == CLOBBER)
1499 sched_analyze_1 (x, insn);
1500 else if (code == PARALLEL)
1502 register int i;
1503 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1505 code = GET_CODE (XVECEXP (x, 0, i));
1506 if (code == SET || code == CLOBBER)
1507 sched_analyze_1 (XVECEXP (x, 0, i), insn);
1508 else
1509 sched_analyze_2 (XVECEXP (x, 0, i), insn);
1512 else
1513 sched_analyze_2 (x, insn);
1515 /* Mark registers CLOBBERED or used by called function. */
1516 if (GET_CODE (insn) == CALL_INSN)
1517 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1519 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
1520 sched_analyze_1 (XEXP (link, 0), insn);
1521 else
1522 sched_analyze_2 (XEXP (link, 0), insn);
1525 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic block, then
1526 we must be sure that no instructions are scheduled across it.
1527 Otherwise, the reg_n_refs info (which depends on loop_depth) would
1528 become incorrect. */
1530 if (loop_notes)
1532 int max_reg = max_reg_num ();
1533 rtx link;
1535 for (i = 0; i < max_reg; i++)
1537 rtx u;
1538 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1539 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1540 reg_last_uses[i] = 0;
1541 if (reg_last_sets[i])
1542 add_dependence (insn, reg_last_sets[i], 0);
1544 reg_pending_sets_all = 1;
1546 flush_pending_lists (insn, 0);
1548 link = loop_notes;
1549 while (XEXP (link, 1))
1550 link = XEXP (link, 1);
1551 XEXP (link, 1) = REG_NOTES (insn);
1552 REG_NOTES (insn) = loop_notes;
1555 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
1557 reg_last_sets[i] = insn;
1559 CLEAR_REG_SET (reg_pending_sets);
1561 if (reg_pending_sets_all)
1563 for (i = 0; i < maxreg; i++)
1564 reg_last_sets[i] = insn;
1565 reg_pending_sets_all = 0;
1568 /* Handle function calls and function returns created by the epilogue
1569 threading code. */
1570 if (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN)
1572 rtx dep_insn;
1573 rtx prev_dep_insn;
1575 /* When scheduling instructions, we make sure calls don't lose their
1576 accompanying USE insns by depending them one on another in order.
1578 Also, we must do the same thing for returns created by the epilogue
1579 threading code. Note this code works only in this special case,
1580 because other passes make no guarantee that they will never emit
1581 an instruction between a USE and a RETURN. There is such a guarantee
1582 for USE instructions immediately before a call. */
1584 prev_dep_insn = insn;
1585 dep_insn = PREV_INSN (insn);
1586 while (GET_CODE (dep_insn) == INSN
1587 && GET_CODE (PATTERN (dep_insn)) == USE
1588 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == REG)
1590 SCHED_GROUP_P (prev_dep_insn) = 1;
1592 /* Make a copy of all dependencies on dep_insn, and add to insn.
1593 This is so that all of the dependencies will apply to the
1594 group. */
1596 for (link = LOG_LINKS (dep_insn); link; link = XEXP (link, 1))
1597 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
1599 prev_dep_insn = dep_insn;
1600 dep_insn = PREV_INSN (dep_insn);
1605 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1606 for every dependency. */
1608 static int
1609 sched_analyze (head, tail)
1610 rtx head, tail;
1612 register rtx insn;
1613 register int n_insns = 0;
1614 register rtx u;
1615 register int luid = 0;
1616 rtx loop_notes = 0;
1618 for (insn = head; ; insn = NEXT_INSN (insn))
1620 INSN_LUID (insn) = luid++;
1622 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1624 sched_analyze_insn (PATTERN (insn), insn, loop_notes);
1625 loop_notes = 0;
1626 n_insns += 1;
1628 else if (GET_CODE (insn) == CALL_INSN)
1630 rtx x;
1631 register int i;
1633 /* Any instruction using a hard register which may get clobbered
1634 by a call needs to be marked as dependent on this call.
1635 This prevents a use of a hard return reg from being moved
1636 past a void call (i.e. it does not explicitly set the hard
1637 return reg). */
1639 /* If this call is followed by a NOTE_INSN_SETJMP, then assume that
1640 all registers, not just hard registers, may be clobbered by this
1641 call. */
1643 /* Insn, being a CALL_INSN, magically depends on
1644 `last_function_call' already. */
1646 if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == NOTE
1647 && NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP)
1649 int max_reg = max_reg_num ();
1650 for (i = 0; i < max_reg; i++)
1652 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1653 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1654 reg_last_uses[i] = 0;
1655 if (reg_last_sets[i])
1656 add_dependence (insn, reg_last_sets[i], 0);
1658 reg_pending_sets_all = 1;
1660 /* Add a pair of fake REG_NOTEs which we will later
1661 convert back into a NOTE_INSN_SETJMP note. See
1662 reemit_notes for why we use a pair of NOTEs. */
1664 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD,
1665 GEN_INT (0),
1666 REG_NOTES (insn));
1667 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD,
1668 GEN_INT (NOTE_INSN_SETJMP),
1669 REG_NOTES (insn));
1671 else
1673 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1674 if (call_used_regs[i] || global_regs[i])
1676 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1677 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1678 reg_last_uses[i] = 0;
1679 if (reg_last_sets[i])
1680 add_dependence (insn, reg_last_sets[i], REG_DEP_ANTI);
1681 SET_REGNO_REG_SET (reg_pending_sets, i);
1685 /* For each insn which shouldn't cross a call, add a dependence
1686 between that insn and this call insn. */
1687 x = LOG_LINKS (sched_before_next_call);
1688 while (x)
1690 add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
1691 x = XEXP (x, 1);
1693 LOG_LINKS (sched_before_next_call) = 0;
1695 sched_analyze_insn (PATTERN (insn), insn, loop_notes);
1696 loop_notes = 0;
1698 /* In the absence of interprocedural alias analysis, we must flush
1699 all pending reads and writes, and start new dependencies starting
1700 from here. But only flush writes for constant calls (which may
1701 be passed a pointer to something we haven't written yet). */
1702 flush_pending_lists (insn, CONST_CALL_P (insn));
1704 /* Depend this function call (actually, the user of this
1705 function call) on all hard register clobberage. */
1706 last_function_call = insn;
1707 n_insns += 1;
1710 /* See comments on reemit_notes as to why we do this. */
1711 else if (GET_CODE (insn) == NOTE
1712 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
1713 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
1714 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1715 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END
1716 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_START
1717 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_END
1718 || (NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP
1719 && GET_CODE (PREV_INSN (insn)) != CALL_INSN)))
1721 loop_notes = gen_rtx_EXPR_LIST (REG_DEAD,
1722 GEN_INT (NOTE_BLOCK_NUMBER (insn)),
1723 loop_notes);
1724 loop_notes = gen_rtx_EXPR_LIST (REG_DEAD,
1725 GEN_INT (NOTE_LINE_NUMBER (insn)),
1726 loop_notes);
1727 CONST_CALL_P (loop_notes) = CONST_CALL_P (insn);
1730 if (insn == tail)
1731 return n_insns;
1734 abort ();
1737 /* Called when we see a set of a register. If death is true, then we are
1738 scanning backwards. Mark that register as unborn. If nobody says
1739 otherwise, that is how things will remain. If death is false, then we
1740 are scanning forwards. Mark that register as being born. */
1742 static void
1743 sched_note_set (x, death)
1744 rtx x;
1745 int death;
1747 register int regno;
1748 register rtx reg = SET_DEST (x);
1749 int subreg_p = 0;
1751 if (reg == 0)
1752 return;
1754 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == STRICT_LOW_PART
1755 || GET_CODE (reg) == SIGN_EXTRACT || GET_CODE (reg) == ZERO_EXTRACT)
1757 /* Must treat modification of just one hardware register of a multi-reg
1758 value or just a byte field of a register exactly the same way that
1759 mark_set_1 in flow.c does, i.e. anything except a paradoxical subreg
1760 does not kill the entire register. */
1761 if (GET_CODE (reg) != SUBREG
1762 || REG_SIZE (SUBREG_REG (reg)) > REG_SIZE (reg))
1763 subreg_p = 1;
1765 reg = SUBREG_REG (reg);
1768 if (GET_CODE (reg) != REG)
1769 return;
1771 /* Global registers are always live, so the code below does not apply
1772 to them. */
1774 regno = REGNO (reg);
1775 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
1777 if (death)
1779 /* If we only set part of the register, then this set does not
1780 kill it. */
1781 if (subreg_p)
1782 return;
1784 /* Try killing this register. */
1785 if (regno < FIRST_PSEUDO_REGISTER)
1787 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1788 while (--j >= 0)
1790 CLEAR_REGNO_REG_SET (bb_live_regs, regno + j);
1791 SET_REGNO_REG_SET (bb_dead_regs, regno + j);
1794 else
1796 CLEAR_REGNO_REG_SET (bb_live_regs, regno);
1797 SET_REGNO_REG_SET (bb_dead_regs, regno);
1800 else
1802 /* Make the register live again. */
1803 if (regno < FIRST_PSEUDO_REGISTER)
1805 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1806 while (--j >= 0)
1808 SET_REGNO_REG_SET (bb_live_regs, regno + j);
1809 CLEAR_REGNO_REG_SET (bb_dead_regs, regno + j);
1812 else
1814 SET_REGNO_REG_SET (bb_live_regs, regno);
1815 CLEAR_REGNO_REG_SET (bb_dead_regs, regno);
1821 /* Macros and functions for keeping the priority queue sorted, and
1822 dealing with queueing and dequeueing of instructions. */
1824 #define SCHED_SORT(READY, NEW_READY, OLD_READY) \
1825 do { if ((NEW_READY) - (OLD_READY) == 1) \
1826 swap_sort (READY, NEW_READY); \
1827 else if ((NEW_READY) - (OLD_READY) > 1) \
1828 qsort (READY, NEW_READY, sizeof (rtx), rank_for_schedule); } \
1829 while (0)
1831 /* Returns a positive value if y is preferred; returns a negative value if
1832 x is preferred. Should never return 0, since that will make the sort
1833 unstable. */
1835 static int
1836 rank_for_schedule (x, y)
1837 const GENERIC_PTR x;
1838 const GENERIC_PTR y;
1840 rtx tmp = *(rtx *)y;
1841 rtx tmp2 = *(rtx *)x;
1842 rtx link;
1843 int tmp_class, tmp2_class;
1844 int value;
1846 /* Choose the instruction with the highest priority, if different. */
1847 if ((value = INSN_PRIORITY (tmp) - INSN_PRIORITY (tmp2)))
1848 return value;
1850 if (last_scheduled_insn)
1852 /* Classify the instructions into three classes:
1853 1) Data dependent on last schedule insn.
1854 2) Anti/Output dependent on last scheduled insn.
1855 3) Independent of last scheduled insn, or has latency of one.
1856 Choose the insn from the highest numbered class if different. */
1857 link = find_insn_list (tmp, LOG_LINKS (last_scheduled_insn));
1858 if (link == 0 || insn_cost (tmp, link, last_scheduled_insn) == 1)
1859 tmp_class = 3;
1860 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
1861 tmp_class = 1;
1862 else
1863 tmp_class = 2;
1865 link = find_insn_list (tmp2, LOG_LINKS (last_scheduled_insn));
1866 if (link == 0 || insn_cost (tmp2, link, last_scheduled_insn) == 1)
1867 tmp2_class = 3;
1868 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
1869 tmp2_class = 1;
1870 else
1871 tmp2_class = 2;
1873 if ((value = tmp_class - tmp2_class))
1874 return value;
1877 /* If insns are equally good, sort by INSN_LUID (original insn order),
1878 so that we make the sort stable. This minimizes instruction movement,
1879 thus minimizing sched's effect on debugging and cross-jumping. */
1880 return INSN_LUID (tmp) - INSN_LUID (tmp2);
1883 /* Resort the array A in which only element at index N may be out of order. */
1885 __inline static void
1886 swap_sort (a, n)
1887 rtx *a;
1888 int n;
1890 rtx insn = a[n-1];
1891 int i = n-2;
1893 while (i >= 0 && rank_for_schedule (a+i, &insn) >= 0)
1895 a[i+1] = a[i];
1896 i -= 1;
1898 a[i+1] = insn;
1901 static int max_priority;
1903 /* Add INSN to the insn queue so that it fires at least N_CYCLES
1904 before the currently executing insn. */
1906 __inline static void
1907 queue_insn (insn, n_cycles)
1908 rtx insn;
1909 int n_cycles;
1911 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1912 NEXT_INSN (insn) = insn_queue[next_q];
1913 insn_queue[next_q] = insn;
1914 q_size += 1;
1917 /* Return nonzero if PAT is the pattern of an insn which makes a
1918 register live. */
1920 __inline static int
1921 birthing_insn_p (pat)
1922 rtx pat;
1924 int j;
1926 if (reload_completed == 1)
1927 return 0;
1929 if (GET_CODE (pat) == SET
1930 && GET_CODE (SET_DEST (pat)) == REG)
1932 rtx dest = SET_DEST (pat);
1933 int i = REGNO (dest);
1935 /* It would be more accurate to use refers_to_regno_p or
1936 reg_mentioned_p to determine when the dest is not live before this
1937 insn. */
1939 if (REGNO_REG_SET_P (bb_live_regs, i))
1940 return (REG_N_SETS (i) == 1);
1942 return 0;
1944 if (GET_CODE (pat) == PARALLEL)
1946 for (j = 0; j < XVECLEN (pat, 0); j++)
1947 if (birthing_insn_p (XVECEXP (pat, 0, j)))
1948 return 1;
1950 return 0;
1953 /* PREV is an insn that is ready to execute. Adjust its priority if that
1954 will help shorten register lifetimes. */
1956 __inline static void
1957 adjust_priority (prev)
1958 rtx prev;
1960 /* Trying to shorten register lives after reload has completed
1961 is useless and wrong. It gives inaccurate schedules. */
1962 if (reload_completed == 0)
1964 rtx note;
1965 int n_deaths = 0;
1967 /* ??? This code has no effect, because REG_DEAD notes are removed
1968 before we ever get here. */
1969 for (note = REG_NOTES (prev); note; note = XEXP (note, 1))
1970 if (REG_NOTE_KIND (note) == REG_DEAD)
1971 n_deaths += 1;
1973 /* Defer scheduling insns which kill registers, since that
1974 shortens register lives. Prefer scheduling insns which
1975 make registers live for the same reason. */
1976 switch (n_deaths)
1978 default:
1979 INSN_PRIORITY (prev) >>= 3;
1980 break;
1981 case 3:
1982 INSN_PRIORITY (prev) >>= 2;
1983 break;
1984 case 2:
1985 case 1:
1986 INSN_PRIORITY (prev) >>= 1;
1987 break;
1988 case 0:
1989 if (birthing_insn_p (PATTERN (prev)))
1991 int max = max_priority;
1993 if (max > INSN_PRIORITY (prev))
1994 INSN_PRIORITY (prev) = max;
1996 break;
1998 #ifdef ADJUST_PRIORITY
1999 ADJUST_PRIORITY (prev);
2000 #endif
2004 /* INSN is the "currently executing insn". Launch each insn which was
2005 waiting on INSN (in the backwards dataflow sense). READY is a
2006 vector of insns which are ready to fire. N_READY is the number of
2007 elements in READY. CLOCK is the current virtual cycle. */
2009 static int
2010 schedule_insn (insn, ready, n_ready, clock)
2011 rtx insn;
2012 rtx *ready;
2013 int n_ready;
2014 int clock;
2016 rtx link;
2017 int new_ready = n_ready;
2019 if (MAX_BLOCKAGE > 1)
2020 schedule_unit (insn_unit (insn), insn, clock);
2022 if (LOG_LINKS (insn) == 0)
2023 return n_ready;
2025 /* This is used by the function adjust_priority above. */
2026 if (n_ready > 0)
2027 max_priority = MAX (INSN_PRIORITY (ready[0]), INSN_PRIORITY (insn));
2028 else
2029 max_priority = INSN_PRIORITY (insn);
2031 for (link = LOG_LINKS (insn); link != 0; link = XEXP (link, 1))
2033 rtx prev = XEXP (link, 0);
2034 int cost = insn_cost (prev, link, insn);
2036 if ((INSN_REF_COUNT (prev) -= 1) != 0)
2038 /* We satisfied one requirement to fire PREV. Record the earliest
2039 time when PREV can fire. No need to do this if the cost is 1,
2040 because PREV can fire no sooner than the next cycle. */
2041 if (cost > 1)
2042 INSN_TICK (prev) = MAX (INSN_TICK (prev), clock + cost);
2044 else
2046 /* We satisfied the last requirement to fire PREV. Ensure that all
2047 timing requirements are satisfied. */
2048 if (INSN_TICK (prev) - clock > cost)
2049 cost = INSN_TICK (prev) - clock;
2051 /* Adjust the priority of PREV and either put it on the ready
2052 list or queue it. */
2053 adjust_priority (prev);
2054 if (cost <= 1)
2055 ready[new_ready++] = prev;
2056 else
2057 queue_insn (prev, cost);
2061 return new_ready;
2064 /* Given N_READY insns in the ready list READY at time CLOCK, queue
2065 those that are blocked due to function unit hazards and rearrange
2066 the remaining ones to minimize subsequent function unit hazards. */
2068 static int
2069 schedule_select (ready, n_ready, clock, file)
2070 rtx *ready;
2071 int n_ready, clock;
2072 FILE *file;
2074 int pri = INSN_PRIORITY (ready[0]);
2075 int i, j, k, q, cost, best_cost, best_insn = 0, new_ready = n_ready;
2076 rtx insn;
2078 /* Work down the ready list in groups of instructions with the same
2079 priority value. Queue insns in the group that are blocked and
2080 select among those that remain for the one with the largest
2081 potential hazard. */
2082 for (i = 0; i < n_ready; i = j)
2084 int opri = pri;
2085 for (j = i + 1; j < n_ready; j++)
2086 if ((pri = INSN_PRIORITY (ready[j])) != opri)
2087 break;
2089 /* Queue insns in the group that are blocked. */
2090 for (k = i, q = 0; k < j; k++)
2092 insn = ready[k];
2093 if ((cost = actual_hazard (insn_unit (insn), insn, clock, 0)) != 0)
2095 q++;
2096 ready[k] = 0;
2097 queue_insn (insn, cost);
2098 if (file)
2099 fprintf (file, "\n;; blocking insn %d for %d cycles",
2100 INSN_UID (insn), cost);
2103 new_ready -= q;
2105 /* Check the next group if all insns were queued. */
2106 if (j - i - q == 0)
2107 continue;
2109 /* If more than one remains, select the first one with the largest
2110 potential hazard. */
2111 else if (j - i - q > 1)
2113 best_cost = -1;
2114 for (k = i; k < j; k++)
2116 if ((insn = ready[k]) == 0)
2117 continue;
2118 if ((cost = potential_hazard (insn_unit (insn), insn, 0))
2119 > best_cost)
2121 best_cost = cost;
2122 best_insn = k;
2126 /* We have found a suitable insn to schedule. */
2127 break;
2130 /* Move the best insn to be front of the ready list. */
2131 if (best_insn != 0)
2133 if (file)
2135 fprintf (file, ", now");
2136 for (i = 0; i < n_ready; i++)
2137 if (ready[i])
2138 fprintf (file, " %d", INSN_UID (ready[i]));
2139 fprintf (file, "\n;; insn %d has a greater potential hazard",
2140 INSN_UID (ready[best_insn]));
2142 for (i = best_insn; i > 0; i--)
2144 insn = ready[i-1];
2145 ready[i-1] = ready[i];
2146 ready[i] = insn;
2150 /* Compact the ready list. */
2151 if (new_ready < n_ready)
2152 for (i = j = 0; i < n_ready; i++)
2153 if (ready[i])
2154 ready[j++] = ready[i];
2156 return new_ready;
2159 /* Add a REG_DEAD note for REG to INSN, reusing a REG_DEAD note from the
2160 dead_notes list. */
2162 static void
2163 create_reg_dead_note (reg, insn)
2164 rtx reg, insn;
2166 rtx link;
2168 /* The number of registers killed after scheduling must be the same as the
2169 number of registers killed before scheduling. The number of REG_DEAD
2170 notes may not be conserved, i.e. two SImode hard register REG_DEAD notes
2171 might become one DImode hard register REG_DEAD note, but the number of
2172 registers killed will be conserved.
2174 We carefully remove REG_DEAD notes from the dead_notes list, so that
2175 there will be none left at the end. If we run out early, then there
2176 is a bug somewhere in flow, combine and/or sched. */
2178 if (dead_notes == 0)
2180 #if 1
2181 abort ();
2182 #else
2183 link = rtx_alloc (EXPR_LIST);
2184 PUT_REG_NOTE_KIND (link, REG_DEAD);
2185 #endif
2187 else
2189 /* Number of regs killed by REG. */
2190 int regs_killed = (REGNO (reg) >= FIRST_PSEUDO_REGISTER ? 1
2191 : HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)));
2192 /* Number of regs killed by REG_DEAD notes taken off the list. */
2193 int reg_note_regs;
2195 link = dead_notes;
2196 reg_note_regs = (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
2197 : HARD_REGNO_NREGS (REGNO (XEXP (link, 0)),
2198 GET_MODE (XEXP (link, 0))));
2199 while (reg_note_regs < regs_killed)
2201 /* LINK might be zero if we killed more registers after scheduling
2202 than before, and the last hard register we kill is actually
2203 multiple hard regs. */
2204 if (link == NULL_RTX)
2205 abort ();
2207 link = XEXP (link, 1);
2208 reg_note_regs += (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
2209 : HARD_REGNO_NREGS (REGNO (XEXP (link, 0)),
2210 GET_MODE (XEXP (link, 0))));
2212 dead_notes = XEXP (link, 1);
2214 /* If we took too many regs kills off, put the extra ones back. */
2215 while (reg_note_regs > regs_killed)
2217 rtx temp_reg, temp_link;
2219 temp_reg = gen_rtx_REG (word_mode, 0);
2220 temp_link = rtx_alloc (EXPR_LIST);
2221 PUT_REG_NOTE_KIND (temp_link, REG_DEAD);
2222 XEXP (temp_link, 0) = temp_reg;
2223 XEXP (temp_link, 1) = dead_notes;
2224 dead_notes = temp_link;
2225 reg_note_regs--;
2229 XEXP (link, 0) = reg;
2230 XEXP (link, 1) = REG_NOTES (insn);
2231 REG_NOTES (insn) = link;
2234 /* Subroutine on attach_deaths_insn--handles the recursive search
2235 through INSN. If SET_P is true, then x is being modified by the insn. */
2237 static void
2238 attach_deaths (x, insn, set_p)
2239 rtx x;
2240 rtx insn;
2241 int set_p;
2243 register int i;
2244 register int j;
2245 register enum rtx_code code;
2246 register char *fmt;
2248 if (x == 0)
2249 return;
2251 code = GET_CODE (x);
2253 switch (code)
2255 case CONST_INT:
2256 case CONST_DOUBLE:
2257 case LABEL_REF:
2258 case SYMBOL_REF:
2259 case CONST:
2260 case CODE_LABEL:
2261 case PC:
2262 case CC0:
2263 /* Get rid of the easy cases first. */
2264 return;
2266 case REG:
2268 /* If the register dies in this insn, queue that note, and mark
2269 this register as needing to die. */
2270 /* This code is very similar to mark_used_1 (if set_p is false)
2271 and mark_set_1 (if set_p is true) in flow.c. */
2273 register int regno;
2274 int some_needed;
2275 int all_needed;
2277 if (set_p)
2278 return;
2280 regno = REGNO (x);
2281 all_needed = some_needed = REGNO_REG_SET_P (old_live_regs, regno);
2282 if (regno < FIRST_PSEUDO_REGISTER)
2284 int n;
2286 n = HARD_REGNO_NREGS (regno, GET_MODE (x));
2287 while (--n > 0)
2289 int needed = (REGNO_REG_SET_P (old_live_regs, regno + n));
2290 some_needed |= needed;
2291 all_needed &= needed;
2295 /* If it wasn't live before we started, then add a REG_DEAD note.
2296 We must check the previous lifetime info not the current info,
2297 because we may have to execute this code several times, e.g.
2298 once for a clobber (which doesn't add a note) and later
2299 for a use (which does add a note).
2301 Always make the register live. We must do this even if it was
2302 live before, because this may be an insn which sets and uses
2303 the same register, in which case the register has already been
2304 killed, so we must make it live again.
2306 Global registers are always live, and should never have a REG_DEAD
2307 note added for them, so none of the code below applies to them. */
2309 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
2311 /* Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2312 STACK_POINTER_REGNUM, since these are always considered to be
2313 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2314 if (regno != FRAME_POINTER_REGNUM
2315 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2316 && ! (regno == HARD_FRAME_POINTER_REGNUM)
2317 #endif
2318 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
2319 && ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
2320 #endif
2321 && regno != STACK_POINTER_REGNUM)
2323 if (! all_needed && ! dead_or_set_p (insn, x))
2325 /* Check for the case where the register dying partially
2326 overlaps the register set by this insn. */
2327 if (regno < FIRST_PSEUDO_REGISTER
2328 && HARD_REGNO_NREGS (regno, GET_MODE (x)) > 1)
2330 int n = HARD_REGNO_NREGS (regno, GET_MODE (x));
2331 while (--n >= 0)
2332 some_needed |= dead_or_set_regno_p (insn, regno + n);
2335 /* If none of the words in X is needed, make a REG_DEAD
2336 note. Otherwise, we must make partial REG_DEAD
2337 notes. */
2338 if (! some_needed)
2339 create_reg_dead_note (x, insn);
2340 else
2342 int i;
2344 /* Don't make a REG_DEAD note for a part of a
2345 register that is set in the insn. */
2346 for (i = HARD_REGNO_NREGS (regno, GET_MODE (x)) - 1;
2347 i >= 0; i--)
2348 if (! REGNO_REG_SET_P (old_live_regs, regno + i)
2349 && ! dead_or_set_regno_p (insn, regno + i))
2350 create_reg_dead_note (gen_rtx_REG (reg_raw_mode[regno + i],
2351 regno + i),
2352 insn);
2357 if (regno < FIRST_PSEUDO_REGISTER)
2359 int j = HARD_REGNO_NREGS (regno, GET_MODE (x));
2360 while (--j >= 0)
2362 CLEAR_REGNO_REG_SET (bb_dead_regs, regno + j);
2363 SET_REGNO_REG_SET (bb_live_regs, regno + j);
2366 else
2368 CLEAR_REGNO_REG_SET (bb_dead_regs, regno);
2369 SET_REGNO_REG_SET (bb_live_regs, regno);
2372 return;
2375 case MEM:
2376 /* Handle tail-recursive case. */
2377 attach_deaths (XEXP (x, 0), insn, 0);
2378 return;
2380 case SUBREG:
2381 attach_deaths (SUBREG_REG (x), insn,
2382 set_p && ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))
2383 <= UNITS_PER_WORD)
2384 || (GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))
2385 == GET_MODE_SIZE (GET_MODE ((x))))));
2386 return;
2388 case STRICT_LOW_PART:
2389 attach_deaths (XEXP (x, 0), insn, 0);
2390 return;
2392 case ZERO_EXTRACT:
2393 case SIGN_EXTRACT:
2394 attach_deaths (XEXP (x, 0), insn, 0);
2395 attach_deaths (XEXP (x, 1), insn, 0);
2396 attach_deaths (XEXP (x, 2), insn, 0);
2397 return;
2399 default:
2400 /* Other cases: walk the insn. */
2401 fmt = GET_RTX_FORMAT (code);
2402 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2404 if (fmt[i] == 'e')
2405 attach_deaths (XEXP (x, i), insn, 0);
2406 else if (fmt[i] == 'E')
2407 for (j = 0; j < XVECLEN (x, i); j++)
2408 attach_deaths (XVECEXP (x, i, j), insn, 0);
2413 /* After INSN has executed, add register death notes for each register
2414 that is dead after INSN. */
2416 static void
2417 attach_deaths_insn (insn)
2418 rtx insn;
2420 rtx x = PATTERN (insn);
2421 register RTX_CODE code = GET_CODE (x);
2422 rtx link;
2424 if (code == SET)
2426 attach_deaths (SET_SRC (x), insn, 0);
2428 /* A register might die here even if it is the destination, e.g.
2429 it is the target of a volatile read and is otherwise unused.
2430 Hence we must always call attach_deaths for the SET_DEST. */
2431 attach_deaths (SET_DEST (x), insn, 1);
2433 else if (code == PARALLEL)
2435 register int i;
2436 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
2438 code = GET_CODE (XVECEXP (x, 0, i));
2439 if (code == SET)
2441 attach_deaths (SET_SRC (XVECEXP (x, 0, i)), insn, 0);
2443 attach_deaths (SET_DEST (XVECEXP (x, 0, i)), insn, 1);
2445 /* Flow does not add REG_DEAD notes to registers that die in
2446 clobbers, so we can't either. */
2447 else if (code != CLOBBER)
2448 attach_deaths (XVECEXP (x, 0, i), insn, 0);
2451 /* If this is a CLOBBER, only add REG_DEAD notes to registers inside a
2452 MEM being clobbered, just like flow. */
2453 else if (code == CLOBBER && GET_CODE (XEXP (x, 0)) == MEM)
2454 attach_deaths (XEXP (XEXP (x, 0), 0), insn, 0);
2455 /* Otherwise don't add a death note to things being clobbered. */
2456 else if (code != CLOBBER)
2457 attach_deaths (x, insn, 0);
2459 /* Make death notes for things used in the called function. */
2460 if (GET_CODE (insn) == CALL_INSN)
2461 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2462 attach_deaths (XEXP (XEXP (link, 0), 0), insn,
2463 GET_CODE (XEXP (link, 0)) == CLOBBER);
2466 /* Delete notes beginning with INSN and maybe put them in the chain
2467 of notes ended by NOTE_LIST.
2468 Returns the insn following the notes. */
2470 static rtx
2471 unlink_notes (insn, tail)
2472 rtx insn, tail;
2474 rtx prev = PREV_INSN (insn);
2476 while (insn != tail && GET_CODE (insn) == NOTE)
2478 rtx next = NEXT_INSN (insn);
2479 /* Delete the note from its current position. */
2480 if (prev)
2481 NEXT_INSN (prev) = next;
2482 if (next)
2483 PREV_INSN (next) = prev;
2485 if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
2486 /* Record line-number notes so they can be reused. */
2487 LINE_NOTE (insn) = insn;
2489 /* Don't save away NOTE_INSN_SETJMPs, because they must remain
2490 immediately after the call they follow. We use a fake
2491 (REG_DEAD (const_int -1)) note to remember them.
2492 Likewise with NOTE_INSN_{LOOP,EHREGION}_{BEG, END}. */
2493 else if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_SETJMP
2494 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG
2495 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_END
2496 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_RANGE_START
2497 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_RANGE_END
2498 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
2499 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
2501 /* Insert the note at the end of the notes list. */
2502 PREV_INSN (insn) = note_list;
2503 if (note_list)
2504 NEXT_INSN (note_list) = insn;
2505 note_list = insn;
2508 insn = next;
2510 return insn;
2513 /* Constructor for `sometimes' data structure. */
2515 static int
2516 new_sometimes_live (regs_sometimes_live, regno, sometimes_max)
2517 struct sometimes *regs_sometimes_live;
2518 int regno;
2519 int sometimes_max;
2521 register struct sometimes *p;
2523 /* There should never be a register greater than max_regno here. If there
2524 is, it means that a define_split has created a new pseudo reg. This
2525 is not allowed, since there will not be flow info available for any
2526 new register, so catch the error here. */
2527 if (regno >= max_regno)
2528 abort ();
2530 p = &regs_sometimes_live[sometimes_max];
2531 p->regno = regno;
2532 p->live_length = 0;
2533 p->calls_crossed = 0;
2534 sometimes_max++;
2535 return sometimes_max;
2538 /* Count lengths of all regs we are currently tracking,
2539 and find new registers no longer live. */
2541 static void
2542 finish_sometimes_live (regs_sometimes_live, sometimes_max)
2543 struct sometimes *regs_sometimes_live;
2544 int sometimes_max;
2546 int i;
2548 for (i = 0; i < sometimes_max; i++)
2550 register struct sometimes *p = &regs_sometimes_live[i];
2551 int regno = p->regno;
2553 sched_reg_live_length[regno] += p->live_length;
2554 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
2558 /* Search INSN for fake REG_DEAD note pairs for NOTE_INSN_SETJMP,
2559 NOTE_INSN_{LOOP,EHREGION}_{BEG,END}; and convert them back into
2560 NOTEs. The REG_DEAD note following first one is contains the saved
2561 value for NOTE_BLOCK_NUMBER which is useful for
2562 NOTE_INSN_EH_REGION_{BEG,END} NOTEs. LAST is the last instruction
2563 output by the instruction scheduler. Return the new value of LAST. */
2565 static rtx
2566 reemit_notes (insn, last)
2567 rtx insn;
2568 rtx last;
2570 rtx note;
2572 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2574 if (REG_NOTE_KIND (note) == REG_DEAD
2575 && GET_CODE (XEXP (note, 0)) == CONST_INT)
2577 if (INTVAL (XEXP (note, 0)) == NOTE_INSN_SETJMP)
2579 CONST_CALL_P (emit_note_after (INTVAL (XEXP (note, 0)), insn))
2580 = CONST_CALL_P (note);
2581 remove_note (insn, note);
2582 note = XEXP (note, 1);
2584 else
2586 last = emit_note_before (INTVAL (XEXP (note, 0)), last);
2587 remove_note (insn, note);
2588 note = XEXP (note, 1);
2589 NOTE_BLOCK_NUMBER (last) = INTVAL (XEXP (note, 0));
2591 remove_note (insn, note);
2594 return last;
2597 /* Use modified list scheduling to rearrange insns in basic block
2598 B. FILE, if nonzero, is where we dump interesting output about
2599 this pass. */
2601 static void
2602 schedule_block (b, file)
2603 int b;
2604 FILE *file;
2606 rtx insn, last;
2607 rtx *ready, link;
2608 int i, j, n_ready = 0, new_ready, n_insns;
2609 int sched_n_insns = 0;
2610 int clock;
2611 #define NEED_NOTHING 0
2612 #define NEED_HEAD 1
2613 #define NEED_TAIL 2
2614 int new_needs;
2616 /* HEAD and TAIL delimit the region being scheduled. */
2617 rtx head = BLOCK_HEAD (b);
2618 rtx tail = BLOCK_END (b);
2619 /* PREV_HEAD and NEXT_TAIL are the boundaries of the insns
2620 being scheduled. When the insns have been ordered,
2621 these insns delimit where the new insns are to be
2622 spliced back into the insn chain. */
2623 rtx next_tail;
2624 rtx prev_head;
2626 /* Keep life information accurate. */
2627 register struct sometimes *regs_sometimes_live;
2628 int sometimes_max;
2630 if (file)
2631 fprintf (file, ";;\t -- basic block number %d from %d to %d --\n",
2632 b, INSN_UID (BLOCK_HEAD (b)), INSN_UID (BLOCK_END (b)));
2634 i = max_reg_num ();
2635 reg_last_uses = (rtx *) alloca (i * sizeof (rtx));
2636 bzero ((char *) reg_last_uses, i * sizeof (rtx));
2637 reg_last_sets = (rtx *) alloca (i * sizeof (rtx));
2638 bzero ((char *) reg_last_sets, i * sizeof (rtx));
2639 reg_pending_sets = ALLOCA_REG_SET ();
2640 CLEAR_REG_SET (reg_pending_sets);
2641 reg_pending_sets_all = 0;
2642 clear_units ();
2644 #if 0
2645 /* We used to have code to avoid getting parameters moved from hard
2646 argument registers into pseudos.
2648 However, it was removed when it proved to be of marginal benefit and
2649 caused problems because of different notions of what the "head" insn
2650 was. */
2652 /* Remove certain insns at the beginning from scheduling,
2653 by advancing HEAD. */
2655 /* At the start of a function, before reload has run, don't delay getting
2656 parameters from hard registers into pseudo registers. */
2657 if (reload_completed == 0 && b == 0)
2659 while (head != tail
2660 && GET_CODE (head) == NOTE
2661 && NOTE_LINE_NUMBER (head) != NOTE_INSN_FUNCTION_BEG)
2662 head = NEXT_INSN (head);
2663 while (head != tail
2664 && GET_CODE (head) == INSN
2665 && GET_CODE (PATTERN (head)) == SET)
2667 rtx src = SET_SRC (PATTERN (head));
2668 while (GET_CODE (src) == SUBREG
2669 || GET_CODE (src) == SIGN_EXTEND
2670 || GET_CODE (src) == ZERO_EXTEND
2671 || GET_CODE (src) == SIGN_EXTRACT
2672 || GET_CODE (src) == ZERO_EXTRACT)
2673 src = XEXP (src, 0);
2674 if (GET_CODE (src) != REG
2675 || REGNO (src) >= FIRST_PSEUDO_REGISTER)
2676 break;
2677 /* Keep this insn from ever being scheduled. */
2678 INSN_REF_COUNT (head) = 1;
2679 head = NEXT_INSN (head);
2682 #endif
2684 /* Don't include any notes or labels at the beginning of the
2685 basic block, or notes at the ends of basic blocks. */
2686 while (head != tail)
2688 if (GET_CODE (head) == NOTE)
2689 head = NEXT_INSN (head);
2690 else if (GET_CODE (tail) == NOTE)
2691 tail = PREV_INSN (tail);
2692 else if (GET_CODE (head) == CODE_LABEL)
2693 head = NEXT_INSN (head);
2694 else break;
2696 /* If the only insn left is a NOTE or a CODE_LABEL, then there is no need
2697 to schedule this block. */
2698 if (head == tail
2699 && (GET_CODE (head) == NOTE || GET_CODE (head) == CODE_LABEL))
2700 goto ret;
2702 #if 0
2703 /* This short-cut doesn't work. It does not count call insns crossed by
2704 registers in reg_sometimes_live. It does not mark these registers as
2705 dead if they die in this block. It does not mark these registers live
2706 (or create new reg_sometimes_live entries if necessary) if they are born
2707 in this block.
2709 The easy solution is to just always schedule a block. This block only
2710 has one insn, so this won't slow down this pass by much. */
2712 if (head == tail)
2713 goto ret;
2714 #endif
2716 /* Now HEAD through TAIL are the insns actually to be rearranged;
2717 Let PREV_HEAD and NEXT_TAIL enclose them. */
2718 prev_head = PREV_INSN (head);
2719 next_tail = NEXT_INSN (tail);
2721 /* Initialize basic block data structures. */
2722 dead_notes = 0;
2723 pending_read_insns = 0;
2724 pending_read_mems = 0;
2725 pending_write_insns = 0;
2726 pending_write_mems = 0;
2727 pending_lists_length = 0;
2728 last_pending_memory_flush = 0;
2729 last_function_call = 0;
2730 last_scheduled_insn = 0;
2732 LOG_LINKS (sched_before_next_call) = 0;
2734 n_insns = sched_analyze (head, tail);
2735 if (n_insns == 0)
2737 free_pending_lists ();
2738 goto ret;
2741 /* Allocate vector to hold insns to be rearranged (except those
2742 insns which are controlled by an insn with SCHED_GROUP_P set).
2743 All these insns are included between ORIG_HEAD and ORIG_TAIL,
2744 as those variables ultimately are set up. */
2745 ready = (rtx *) alloca ((n_insns+1) * sizeof (rtx));
2747 /* TAIL is now the last of the insns to be rearranged.
2748 Put those insns into the READY vector. */
2749 insn = tail;
2751 /* For all branches, calls, uses, and cc0 setters, force them to remain
2752 in order at the end of the block by adding dependencies and giving
2753 the last a high priority. There may be notes present, and prev_head
2754 may also be a note.
2756 Branches must obviously remain at the end. Calls should remain at the
2757 end since moving them results in worse register allocation. Uses remain
2758 at the end to ensure proper register allocation. cc0 setters remaim
2759 at the end because they can't be moved away from their cc0 user. */
2760 last = 0;
2761 while (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN
2762 || (GET_CODE (insn) == INSN
2763 && (GET_CODE (PATTERN (insn)) == USE
2764 #ifdef HAVE_cc0
2765 || sets_cc0_p (PATTERN (insn))
2766 #endif
2768 || GET_CODE (insn) == NOTE)
2770 if (GET_CODE (insn) != NOTE)
2772 priority (insn);
2773 if (last == 0)
2775 ready[n_ready++] = insn;
2776 INSN_PRIORITY (insn) = TAIL_PRIORITY - i;
2777 INSN_REF_COUNT (insn) = 0;
2779 else if (! find_insn_list (insn, LOG_LINKS (last)))
2781 add_dependence (last, insn, REG_DEP_ANTI);
2782 INSN_REF_COUNT (insn)++;
2784 last = insn;
2786 /* Skip over insns that are part of a group. */
2787 while (SCHED_GROUP_P (insn))
2789 insn = prev_nonnote_insn (insn);
2790 priority (insn);
2794 insn = PREV_INSN (insn);
2795 /* Don't overrun the bounds of the basic block. */
2796 if (insn == prev_head)
2797 break;
2800 /* Assign priorities to instructions. Also check whether they
2801 are in priority order already. If so then I will be nonnegative.
2802 We use this shortcut only before reloading. */
2803 #if 0
2804 i = reload_completed ? DONE_PRIORITY : MAX_PRIORITY;
2805 #endif
2807 for (; insn != prev_head; insn = PREV_INSN (insn))
2809 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2811 priority (insn);
2812 if (INSN_REF_COUNT (insn) == 0)
2814 if (last == 0)
2815 ready[n_ready++] = insn;
2816 else
2818 /* Make this dependent on the last of the instructions
2819 that must remain in order at the end of the block. */
2820 add_dependence (last, insn, REG_DEP_ANTI);
2821 INSN_REF_COUNT (insn) = 1;
2824 if (SCHED_GROUP_P (insn))
2826 while (SCHED_GROUP_P (insn))
2828 insn = prev_nonnote_insn (insn);
2829 priority (insn);
2831 continue;
2833 #if 0
2834 if (i < 0)
2835 continue;
2836 if (INSN_PRIORITY (insn) < i)
2837 i = INSN_PRIORITY (insn);
2838 else if (INSN_PRIORITY (insn) > i)
2839 i = DONE_PRIORITY;
2840 #endif
2844 #if 0
2845 /* This short-cut doesn't work. It does not count call insns crossed by
2846 registers in reg_sometimes_live. It does not mark these registers as
2847 dead if they die in this block. It does not mark these registers live
2848 (or create new reg_sometimes_live entries if necessary) if they are born
2849 in this block.
2851 The easy solution is to just always schedule a block. These blocks tend
2852 to be very short, so this doesn't slow down this pass by much. */
2854 /* If existing order is good, don't bother to reorder. */
2855 if (i != DONE_PRIORITY)
2857 if (file)
2858 fprintf (file, ";; already scheduled\n");
2860 if (reload_completed == 0)
2862 for (i = 0; i < sometimes_max; i++)
2863 regs_sometimes_live[i].live_length += n_insns;
2865 finish_sometimes_live (regs_sometimes_live, sometimes_max);
2867 free_pending_lists ();
2868 goto ret;
2870 #endif
2872 /* Scan all the insns to be scheduled, removing NOTE insns
2873 and register death notes.
2874 Line number NOTE insns end up in NOTE_LIST.
2875 Register death notes end up in DEAD_NOTES.
2877 Recreate the register life information for the end of this basic
2878 block. */
2880 if (reload_completed == 0)
2882 COPY_REG_SET (bb_live_regs, basic_block_live_at_start[b]);
2883 CLEAR_REG_SET (bb_dead_regs);
2885 if (b == 0)
2887 /* This is the first block in the function. There may be insns
2888 before head that we can't schedule. We still need to examine
2889 them though for accurate register lifetime analysis. */
2891 /* We don't want to remove any REG_DEAD notes as the code below
2892 does. */
2894 for (insn = BLOCK_HEAD (b); insn != head;
2895 insn = NEXT_INSN (insn))
2896 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2898 /* See if the register gets born here. */
2899 /* We must check for registers being born before we check for
2900 registers dying. It is possible for a register to be born
2901 and die in the same insn, e.g. reading from a volatile
2902 memory location into an otherwise unused register. Such
2903 a register must be marked as dead after this insn. */
2904 if (GET_CODE (PATTERN (insn)) == SET
2905 || GET_CODE (PATTERN (insn)) == CLOBBER)
2906 sched_note_set (PATTERN (insn), 0);
2907 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2909 int j;
2910 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2911 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2912 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2913 sched_note_set (XVECEXP (PATTERN (insn), 0, j), 0);
2915 /* ??? This code is obsolete and should be deleted. It
2916 is harmless though, so we will leave it in for now. */
2917 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2918 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
2919 sched_note_set (XVECEXP (PATTERN (insn), 0, j), 0);
2922 /* Each call clobbers (makes live) all call-clobbered regs
2923 that are not global or fixed. Note that the function-value
2924 reg is a call_clobbered reg. */
2926 if (GET_CODE (insn) == CALL_INSN)
2928 int j;
2929 for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
2930 if (call_used_regs[j] && ! global_regs[j]
2931 && ! fixed_regs[j])
2933 SET_REGNO_REG_SET (bb_live_regs, j);
2934 CLEAR_REGNO_REG_SET (bb_dead_regs, j);
2938 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2940 if ((REG_NOTE_KIND (link) == REG_DEAD
2941 || REG_NOTE_KIND (link) == REG_UNUSED)
2942 /* Verify that the REG_NOTE has a valid value. */
2943 && GET_CODE (XEXP (link, 0)) == REG)
2945 register int regno = REGNO (XEXP (link, 0));
2947 if (regno < FIRST_PSEUDO_REGISTER)
2949 int j = HARD_REGNO_NREGS (regno,
2950 GET_MODE (XEXP (link, 0)));
2951 while (--j >= 0)
2953 CLEAR_REGNO_REG_SET (bb_live_regs, regno + j);
2954 SET_REGNO_REG_SET (bb_dead_regs, regno + j);
2957 else
2959 CLEAR_REGNO_REG_SET (bb_live_regs, regno);
2960 SET_REGNO_REG_SET (bb_dead_regs, regno);
2968 /* If debugging information is being produced, keep track of the line
2969 number notes for each insn. */
2970 if (write_symbols != NO_DEBUG)
2972 /* We must use the true line number for the first insn in the block
2973 that was computed and saved at the start of this pass. We can't
2974 use the current line number, because scheduling of the previous
2975 block may have changed the current line number. */
2976 rtx line = line_note_head[b];
2978 for (insn = BLOCK_HEAD (b);
2979 insn != next_tail;
2980 insn = NEXT_INSN (insn))
2981 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
2982 line = insn;
2983 else
2984 LINE_NOTE (insn) = line;
2987 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2989 rtx prev, next, link;
2991 /* Farm out notes. This is needed to keep the debugger from
2992 getting completely deranged. */
2993 if (GET_CODE (insn) == NOTE)
2995 prev = insn;
2996 insn = unlink_notes (insn, next_tail);
2997 if (prev == tail)
2998 abort ();
2999 if (prev == head)
3000 abort ();
3001 if (insn == next_tail)
3002 abort ();
3005 if (reload_completed == 0
3006 && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3008 /* See if the register gets born here. */
3009 /* We must check for registers being born before we check for
3010 registers dying. It is possible for a register to be born and
3011 die in the same insn, e.g. reading from a volatile memory
3012 location into an otherwise unused register. Such a register
3013 must be marked as dead after this insn. */
3014 if (GET_CODE (PATTERN (insn)) == SET
3015 || GET_CODE (PATTERN (insn)) == CLOBBER)
3016 sched_note_set (PATTERN (insn), 0);
3017 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3019 int j;
3020 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3021 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
3022 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
3023 sched_note_set (XVECEXP (PATTERN (insn), 0, j), 0);
3025 /* ??? This code is obsolete and should be deleted. It
3026 is harmless though, so we will leave it in for now. */
3027 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3028 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
3029 sched_note_set (XVECEXP (PATTERN (insn), 0, j), 0);
3032 /* Each call clobbers (makes live) all call-clobbered regs that are
3033 not global or fixed. Note that the function-value reg is a
3034 call_clobbered reg. */
3036 if (GET_CODE (insn) == CALL_INSN)
3038 int j;
3039 for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
3040 if (call_used_regs[j] && ! global_regs[j]
3041 && ! fixed_regs[j])
3043 SET_REGNO_REG_SET (bb_live_regs, j);
3044 CLEAR_REGNO_REG_SET (bb_dead_regs, j);
3048 /* Need to know what registers this insn kills. */
3049 for (prev = 0, link = REG_NOTES (insn); link; link = next)
3051 next = XEXP (link, 1);
3052 if ((REG_NOTE_KIND (link) == REG_DEAD
3053 || REG_NOTE_KIND (link) == REG_UNUSED)
3054 /* Verify that the REG_NOTE has a valid value. */
3055 && GET_CODE (XEXP (link, 0)) == REG)
3057 register int regno = REGNO (XEXP (link, 0));
3059 /* Only unlink REG_DEAD notes; leave REG_UNUSED notes
3060 alone. */
3061 if (REG_NOTE_KIND (link) == REG_DEAD)
3063 if (prev)
3064 XEXP (prev, 1) = next;
3065 else
3066 REG_NOTES (insn) = next;
3067 XEXP (link, 1) = dead_notes;
3068 dead_notes = link;
3070 else
3071 prev = link;
3073 if (regno < FIRST_PSEUDO_REGISTER)
3075 int j = HARD_REGNO_NREGS (regno,
3076 GET_MODE (XEXP (link, 0)));
3077 while (--j >= 0)
3079 CLEAR_REGNO_REG_SET (bb_live_regs, regno + j);
3080 SET_REGNO_REG_SET (bb_dead_regs, regno + j);
3083 else
3085 CLEAR_REGNO_REG_SET (bb_live_regs, regno);
3086 SET_REGNO_REG_SET (bb_dead_regs, regno);
3089 else
3090 prev = link;
3095 if (reload_completed == 0)
3097 /* Keep track of register lives. */
3098 old_live_regs = ALLOCA_REG_SET ();
3099 regs_sometimes_live
3100 = (struct sometimes *) alloca (max_regno * sizeof (struct sometimes));
3101 sometimes_max = 0;
3103 /* Start with registers live at end. */
3104 COPY_REG_SET (old_live_regs, bb_live_regs);
3105 EXECUTE_IF_SET_IN_REG_SET (bb_live_regs, 0, j,
3107 sometimes_max
3108 = new_sometimes_live (regs_sometimes_live,
3109 j, sometimes_max);
3113 SCHED_SORT (ready, n_ready, 1);
3115 if (file)
3117 fprintf (file, ";; ready list initially:\n;; ");
3118 for (i = 0; i < n_ready; i++)
3119 fprintf (file, "%d ", INSN_UID (ready[i]));
3120 fprintf (file, "\n\n");
3122 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3123 if (INSN_PRIORITY (insn) > 0)
3124 fprintf (file, ";; insn[%4d]: priority = %4d, ref_count = %4d\n",
3125 INSN_UID (insn), INSN_PRIORITY (insn),
3126 INSN_REF_COUNT (insn));
3129 /* Now HEAD and TAIL are going to become disconnected
3130 entirely from the insn chain. */
3131 tail = 0;
3133 /* Q_SIZE will always be zero here. */
3134 q_ptr = 0; clock = 0;
3135 bzero ((char *) insn_queue, sizeof (insn_queue));
3137 /* Now, perform list scheduling. */
3139 /* Where we start inserting insns is after TAIL. */
3140 last = next_tail;
3142 new_needs = (NEXT_INSN (prev_head) == BLOCK_HEAD (b)
3143 ? NEED_HEAD : NEED_NOTHING);
3144 if (PREV_INSN (next_tail) == BLOCK_END (b))
3145 new_needs |= NEED_TAIL;
3147 new_ready = n_ready;
3148 while (sched_n_insns < n_insns)
3150 q_ptr = NEXT_Q (q_ptr); clock++;
3152 /* Add all pending insns that can be scheduled without stalls to the
3153 ready list. */
3154 for (insn = insn_queue[q_ptr]; insn; insn = NEXT_INSN (insn))
3156 if (file)
3157 fprintf (file, ";; launching %d before %d with no stalls at T-%d\n",
3158 INSN_UID (insn), INSN_UID (last), clock);
3159 ready[new_ready++] = insn;
3160 q_size -= 1;
3162 insn_queue[q_ptr] = 0;
3164 /* If there are no ready insns, stall until one is ready and add all
3165 of the pending insns at that point to the ready list. */
3166 if (new_ready == 0)
3168 register int stalls;
3170 for (stalls = 1; stalls < INSN_QUEUE_SIZE; stalls++)
3171 if ((insn = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
3173 for (; insn; insn = NEXT_INSN (insn))
3175 if (file)
3176 fprintf (file, ";; launching %d before %d with %d stalls at T-%d\n",
3177 INSN_UID (insn), INSN_UID (last), stalls, clock);
3178 ready[new_ready++] = insn;
3179 q_size -= 1;
3181 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = 0;
3182 break;
3185 q_ptr = NEXT_Q_AFTER (q_ptr, stalls); clock += stalls;
3188 /* There should be some instructions waiting to fire. */
3189 if (new_ready == 0)
3190 abort ();
3192 if (file)
3194 fprintf (file, ";; ready list at T-%d:", clock);
3195 for (i = 0; i < new_ready; i++)
3196 fprintf (file, " %d (%x)",
3197 INSN_UID (ready[i]), INSN_PRIORITY (ready[i]));
3200 /* Sort the ready list and choose the best insn to schedule. Select
3201 which insn should issue in this cycle and queue those that are
3202 blocked by function unit hazards.
3204 N_READY holds the number of items that were scheduled the last time,
3205 minus the one instruction scheduled on the last loop iteration; it
3206 is not modified for any other reason in this loop. */
3208 SCHED_SORT (ready, new_ready, n_ready);
3209 if (MAX_BLOCKAGE > 1)
3211 new_ready = schedule_select (ready, new_ready, clock, file);
3212 if (new_ready == 0)
3214 if (file)
3215 fprintf (file, "\n");
3216 /* We must set n_ready here, to ensure that sorting always
3217 occurs when we come back to the SCHED_SORT line above. */
3218 n_ready = 0;
3219 continue;
3222 n_ready = new_ready;
3223 last_scheduled_insn = insn = ready[0];
3225 /* The first insn scheduled becomes the new tail. */
3226 if (tail == 0)
3227 tail = insn;
3229 if (file)
3231 fprintf (file, ", now");
3232 for (i = 0; i < n_ready; i++)
3233 fprintf (file, " %d", INSN_UID (ready[i]));
3234 fprintf (file, "\n");
3237 if (DONE_PRIORITY_P (insn))
3238 abort ();
3240 if (reload_completed == 0)
3242 /* Process this insn, and each insn linked to this one which must
3243 be immediately output after this insn. */
3246 /* First we kill registers set by this insn, and then we
3247 make registers used by this insn live. This is the opposite
3248 order used above because we are traversing the instructions
3249 backwards. */
3251 /* Strictly speaking, we should scan REG_UNUSED notes and make
3252 every register mentioned there live, however, we will just
3253 kill them again immediately below, so there doesn't seem to
3254 be any reason why we bother to do this. */
3256 /* See if this is the last notice we must take of a register. */
3257 if (GET_CODE (PATTERN (insn)) == SET
3258 || GET_CODE (PATTERN (insn)) == CLOBBER)
3259 sched_note_set (PATTERN (insn), 1);
3260 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3262 int j;
3263 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3264 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
3265 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
3266 sched_note_set (XVECEXP (PATTERN (insn), 0, j), 1);
3269 /* This code keeps life analysis information up to date. */
3270 if (GET_CODE (insn) == CALL_INSN)
3272 register struct sometimes *p;
3274 /* A call kills all call used registers that are not
3275 global or fixed, except for those mentioned in the call
3276 pattern which will be made live again later. */
3277 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3278 if (call_used_regs[i] && ! global_regs[i]
3279 && ! fixed_regs[i])
3281 CLEAR_REGNO_REG_SET (bb_live_regs, i);
3282 SET_REGNO_REG_SET (bb_dead_regs, i);
3285 /* Regs live at the time of a call instruction must not
3286 go in a register clobbered by calls. Record this for
3287 all regs now live. Note that insns which are born or
3288 die in a call do not cross a call, so this must be done
3289 after the killings (above) and before the births
3290 (below). */
3291 p = regs_sometimes_live;
3292 for (i = 0; i < sometimes_max; i++, p++)
3293 if (REGNO_REG_SET_P (bb_live_regs, p->regno))
3294 p->calls_crossed += 1;
3297 /* Make every register used live, and add REG_DEAD notes for
3298 registers which were not live before we started. */
3299 attach_deaths_insn (insn);
3301 /* Find registers now made live by that instruction. */
3302 EXECUTE_IF_AND_COMPL_IN_REG_SET (bb_live_regs, old_live_regs, 0, i,
3304 sometimes_max
3305 = new_sometimes_live (regs_sometimes_live,
3306 i, sometimes_max);
3308 IOR_REG_SET (old_live_regs, bb_live_regs);
3310 /* Count lengths of all regs we are worrying about now,
3311 and handle registers no longer live. */
3313 for (i = 0; i < sometimes_max; i++)
3315 register struct sometimes *p = &regs_sometimes_live[i];
3316 int regno = p->regno;
3318 p->live_length += 1;
3320 if (!REGNO_REG_SET_P (bb_live_regs, p->regno))
3322 /* This is the end of one of this register's lifetime
3323 segments. Save the lifetime info collected so far,
3324 and clear its bit in the old_live_regs entry. */
3325 sched_reg_live_length[regno] += p->live_length;
3326 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
3327 CLEAR_REGNO_REG_SET (old_live_regs, p->regno);
3329 /* Delete the reg_sometimes_live entry for this reg by
3330 copying the last entry over top of it. */
3331 *p = regs_sometimes_live[--sometimes_max];
3332 /* ...and decrement i so that this newly copied entry
3333 will be processed. */
3334 i--;
3338 link = insn;
3339 insn = PREV_INSN (insn);
3341 while (SCHED_GROUP_P (link));
3343 /* Set INSN back to the insn we are scheduling now. */
3344 insn = ready[0];
3347 /* Schedule INSN. Remove it from the ready list. */
3348 ready += 1;
3349 n_ready -= 1;
3351 sched_n_insns += 1;
3352 NEXT_INSN (insn) = last;
3353 PREV_INSN (last) = insn;
3355 /* Everything that precedes INSN now either becomes "ready", if
3356 it can execute immediately before INSN, or "pending", if
3357 there must be a delay. Give INSN high enough priority that
3358 at least one (maybe more) reg-killing insns can be launched
3359 ahead of all others. Mark INSN as scheduled by changing its
3360 priority to -1. */
3361 INSN_PRIORITY (insn) = LAUNCH_PRIORITY;
3362 new_ready = schedule_insn (insn, ready, n_ready, clock);
3363 INSN_PRIORITY (insn) = DONE_PRIORITY;
3365 /* Schedule all prior insns that must not be moved. */
3366 if (SCHED_GROUP_P (insn))
3368 /* Disable these insns from being launched, in case one of the
3369 insns in the group has a dependency on an earlier one. */
3370 link = insn;
3371 while (SCHED_GROUP_P (link))
3373 /* Disable these insns from being launched by anybody. */
3374 link = PREV_INSN (link);
3375 INSN_REF_COUNT (link) = 0;
3378 /* Now handle each group insn like the main insn was handled
3379 above. */
3380 link = insn;
3381 while (SCHED_GROUP_P (link))
3383 link = PREV_INSN (link);
3385 sched_n_insns += 1;
3387 /* ??? Why don't we set LAUNCH_PRIORITY here? */
3388 new_ready = schedule_insn (link, ready, new_ready, clock);
3389 INSN_PRIORITY (link) = DONE_PRIORITY;
3393 /* Put back NOTE_INSN_SETJMP,
3394 NOTE_INSN_{LOOP,EHREGION}_{BEGIN,END} notes. */
3396 /* To prime the loop. We need to handle INSN and all the insns in the
3397 sched group. */
3398 last = NEXT_INSN (insn);
3401 insn = PREV_INSN (last);
3403 /* Maintain a valid chain so emit_note_before works.
3404 This is necessary because PREV_INSN (insn) isn't valid
3405 (if ! SCHED_GROUP_P) and if it points to an insn already
3406 scheduled, a circularity will result. */
3407 if (! SCHED_GROUP_P (insn))
3409 NEXT_INSN (prev_head) = insn;
3410 PREV_INSN (insn) = prev_head;
3413 last = reemit_notes (insn, insn);
3415 while (SCHED_GROUP_P (insn));
3417 if (q_size != 0)
3418 abort ();
3420 if (reload_completed == 0)
3421 finish_sometimes_live (regs_sometimes_live, sometimes_max);
3423 /* HEAD is now the first insn in the chain of insns that
3424 been scheduled by the loop above.
3425 TAIL is the last of those insns. */
3426 head = last;
3428 /* NOTE_LIST is the end of a chain of notes previously found
3429 among the insns. Insert them at the beginning of the insns. */
3430 if (note_list != 0)
3432 rtx note_head = note_list;
3433 while (PREV_INSN (note_head))
3434 note_head = PREV_INSN (note_head);
3436 PREV_INSN (head) = note_list;
3437 NEXT_INSN (note_list) = head;
3438 head = note_head;
3441 /* There should be no REG_DEAD notes leftover at the end.
3442 In practice, this can occur as the result of bugs in flow, combine.c,
3443 and/or sched.c. The values of the REG_DEAD notes remaining are
3444 meaningless, because dead_notes is just used as a free list. */
3445 #if 1
3446 if (dead_notes != 0)
3447 abort ();
3448 #endif
3450 if (new_needs & NEED_HEAD)
3451 BLOCK_HEAD (b) = head;
3452 PREV_INSN (head) = prev_head;
3453 NEXT_INSN (prev_head) = head;
3455 if (new_needs & NEED_TAIL)
3456 BLOCK_END (b) = tail;
3457 NEXT_INSN (tail) = next_tail;
3458 PREV_INSN (next_tail) = tail;
3460 /* Restore the line-number notes of each insn. */
3461 if (write_symbols != NO_DEBUG)
3463 rtx line, note, prev, new;
3464 int notes = 0;
3466 head = BLOCK_HEAD (b);
3467 next_tail = NEXT_INSN (BLOCK_END (b));
3469 /* Determine the current line-number. We want to know the current
3470 line number of the first insn of the block here, in case it is
3471 different from the true line number that was saved earlier. If
3472 different, then we need a line number note before the first insn
3473 of this block. If it happens to be the same, then we don't want to
3474 emit another line number note here. */
3475 for (line = head; line; line = PREV_INSN (line))
3476 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
3477 break;
3479 /* Walk the insns keeping track of the current line-number and inserting
3480 the line-number notes as needed. */
3481 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3482 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
3483 line = insn;
3484 /* This used to emit line number notes before every non-deleted note.
3485 However, this confuses a debugger, because line notes not separated
3486 by real instructions all end up at the same address. I can find no
3487 use for line number notes before other notes, so none are emitted. */
3488 else if (GET_CODE (insn) != NOTE
3489 && (note = LINE_NOTE (insn)) != 0
3490 && note != line
3491 && (line == 0
3492 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
3493 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)))
3495 line = note;
3496 prev = PREV_INSN (insn);
3497 if (LINE_NOTE (note))
3499 /* Re-use the original line-number note. */
3500 LINE_NOTE (note) = 0;
3501 PREV_INSN (note) = prev;
3502 NEXT_INSN (prev) = note;
3503 PREV_INSN (insn) = note;
3504 NEXT_INSN (note) = insn;
3506 else
3508 notes++;
3509 new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
3510 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
3511 RTX_INTEGRATED_P (new) = RTX_INTEGRATED_P (note);
3514 if (file && notes)
3515 fprintf (file, ";; added %d line-number notes\n", notes);
3518 if (file)
3520 fprintf (file, ";; total time = %d\n;; new basic block head = %d\n;; new basic block end = %d\n\n",
3521 clock, INSN_UID (BLOCK_HEAD (b)), INSN_UID (BLOCK_END (b)));
3524 /* Yow! We're done! */
3525 free_pending_lists ();
3527 ret:
3528 FREE_REG_SET (reg_pending_sets);
3529 FREE_REG_SET (old_live_regs);
3531 return;
3534 /* Subroutine of update_flow_info. Determines whether any new REG_NOTEs are
3535 needed for the hard register mentioned in the note. This can happen
3536 if the reference to the hard register in the original insn was split into
3537 several smaller hard register references in the split insns. */
3539 static void
3540 split_hard_reg_notes (note, first, last)
3541 rtx note, first, last;
3543 rtx reg, temp, link;
3544 int n_regs, i, new_reg;
3545 rtx insn;
3547 /* Assume that this is a REG_DEAD note. */
3548 if (REG_NOTE_KIND (note) != REG_DEAD)
3549 abort ();
3551 reg = XEXP (note, 0);
3553 n_regs = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
3555 for (i = 0; i < n_regs; i++)
3557 new_reg = REGNO (reg) + i;
3559 /* Check for references to new_reg in the split insns. */
3560 for (insn = last; ; insn = PREV_INSN (insn))
3562 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3563 && (temp = regno_use_in (new_reg, PATTERN (insn))))
3565 /* Create a new reg dead note here. */
3566 link = rtx_alloc (EXPR_LIST);
3567 PUT_REG_NOTE_KIND (link, REG_DEAD);
3568 XEXP (link, 0) = temp;
3569 XEXP (link, 1) = REG_NOTES (insn);
3570 REG_NOTES (insn) = link;
3572 /* If killed multiple registers here, then add in the excess. */
3573 i += HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)) - 1;
3575 break;
3577 /* It isn't mentioned anywhere, so no new reg note is needed for
3578 this register. */
3579 if (insn == first)
3580 break;
3585 /* Subroutine of update_flow_info. Determines whether a SET or CLOBBER in an
3586 insn created by splitting needs a REG_DEAD or REG_UNUSED note added. */
3588 static void
3589 new_insn_dead_notes (pat, insn, last, orig_insn)
3590 rtx pat, insn, last, orig_insn;
3592 rtx dest, tem, set;
3594 /* PAT is either a CLOBBER or a SET here. */
3595 dest = XEXP (pat, 0);
3597 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
3598 || GET_CODE (dest) == STRICT_LOW_PART
3599 || GET_CODE (dest) == SIGN_EXTRACT)
3600 dest = XEXP (dest, 0);
3602 if (GET_CODE (dest) == REG)
3604 /* If the original insn already used this register, we may not add new
3605 notes for it. One example for a split that needs this test is
3606 when a multi-word memory access with register-indirect addressing
3607 is split into multiple memory accesses with auto-increment and
3608 one adjusting add instruction for the address register. */
3609 if (reg_referenced_p (dest, PATTERN (orig_insn)))
3610 return;
3611 for (tem = last; tem != insn; tem = PREV_INSN (tem))
3613 if (GET_RTX_CLASS (GET_CODE (tem)) == 'i'
3614 && reg_overlap_mentioned_p (dest, PATTERN (tem))
3615 && (set = single_set (tem)))
3617 rtx tem_dest = SET_DEST (set);
3619 while (GET_CODE (tem_dest) == ZERO_EXTRACT
3620 || GET_CODE (tem_dest) == SUBREG
3621 || GET_CODE (tem_dest) == STRICT_LOW_PART
3622 || GET_CODE (tem_dest) == SIGN_EXTRACT)
3623 tem_dest = XEXP (tem_dest, 0);
3625 if (! rtx_equal_p (tem_dest, dest))
3627 /* Use the same scheme as combine.c, don't put both REG_DEAD
3628 and REG_UNUSED notes on the same insn. */
3629 if (! find_regno_note (tem, REG_UNUSED, REGNO (dest))
3630 && ! find_regno_note (tem, REG_DEAD, REGNO (dest)))
3632 rtx note = rtx_alloc (EXPR_LIST);
3633 PUT_REG_NOTE_KIND (note, REG_DEAD);
3634 XEXP (note, 0) = dest;
3635 XEXP (note, 1) = REG_NOTES (tem);
3636 REG_NOTES (tem) = note;
3638 /* The reg only dies in one insn, the last one that uses
3639 it. */
3640 break;
3642 else if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
3643 /* We found an instruction that both uses the register,
3644 and sets it, so no new REG_NOTE is needed for this set. */
3645 break;
3648 /* If this is a set, it must die somewhere, unless it is the dest of
3649 the original insn, and hence is live after the original insn. Abort
3650 if it isn't supposed to be live after the original insn.
3652 If this is a clobber, then just add a REG_UNUSED note. */
3653 if (tem == insn)
3655 int live_after_orig_insn = 0;
3656 rtx pattern = PATTERN (orig_insn);
3657 int i;
3659 if (GET_CODE (pat) == CLOBBER)
3661 rtx note = rtx_alloc (EXPR_LIST);
3662 PUT_REG_NOTE_KIND (note, REG_UNUSED);
3663 XEXP (note, 0) = dest;
3664 XEXP (note, 1) = REG_NOTES (insn);
3665 REG_NOTES (insn) = note;
3666 return;
3669 /* The original insn could have multiple sets, so search the
3670 insn for all sets. */
3671 if (GET_CODE (pattern) == SET)
3673 if (reg_overlap_mentioned_p (dest, SET_DEST (pattern)))
3674 live_after_orig_insn = 1;
3676 else if (GET_CODE (pattern) == PARALLEL)
3678 for (i = 0; i < XVECLEN (pattern, 0); i++)
3679 if (GET_CODE (XVECEXP (pattern, 0, i)) == SET
3680 && reg_overlap_mentioned_p (dest,
3681 SET_DEST (XVECEXP (pattern,
3682 0, i))))
3683 live_after_orig_insn = 1;
3686 if (! live_after_orig_insn)
3687 abort ();
3692 /* Subroutine of update_flow_info. Update the value of reg_n_sets for all
3693 registers modified by X. INC is -1 if the containing insn is being deleted,
3694 and is 1 if the containing insn is a newly generated insn. */
3696 static void
3697 update_n_sets (x, inc)
3698 rtx x;
3699 int inc;
3701 rtx dest = SET_DEST (x);
3703 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
3704 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
3705 dest = SUBREG_REG (dest);
3707 if (GET_CODE (dest) == REG)
3709 int regno = REGNO (dest);
3711 if (regno < FIRST_PSEUDO_REGISTER)
3713 register int i;
3714 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (dest));
3716 for (i = regno; i < endregno; i++)
3717 REG_N_SETS (i) += inc;
3719 else
3720 REG_N_SETS (regno) += inc;
3724 /* Updates all flow-analysis related quantities (including REG_NOTES) for
3725 the insns from FIRST to LAST inclusive that were created by splitting
3726 ORIG_INSN. NOTES are the original REG_NOTES. */
3728 void
3729 update_flow_info (notes, first, last, orig_insn)
3730 rtx notes;
3731 rtx first, last;
3732 rtx orig_insn;
3734 rtx insn, note;
3735 rtx next;
3736 rtx orig_dest, temp;
3737 rtx set;
3739 /* Get and save the destination set by the original insn. */
3741 orig_dest = single_set (orig_insn);
3742 if (orig_dest)
3743 orig_dest = SET_DEST (orig_dest);
3745 /* Move REG_NOTES from the original insn to where they now belong. */
3747 for (note = notes; note; note = next)
3749 next = XEXP (note, 1);
3750 switch (REG_NOTE_KIND (note))
3752 case REG_DEAD:
3753 case REG_UNUSED:
3754 /* Move these notes from the original insn to the last new insn where
3755 the register is now set. */
3757 for (insn = last; ; insn = PREV_INSN (insn))
3759 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3760 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
3762 /* If this note refers to a multiple word hard register, it
3763 may have been split into several smaller hard register
3764 references, so handle it specially. */
3765 temp = XEXP (note, 0);
3766 if (REG_NOTE_KIND (note) == REG_DEAD
3767 && GET_CODE (temp) == REG
3768 && REGNO (temp) < FIRST_PSEUDO_REGISTER
3769 && HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)) > 1)
3770 split_hard_reg_notes (note, first, last);
3771 else
3773 XEXP (note, 1) = REG_NOTES (insn);
3774 REG_NOTES (insn) = note;
3777 /* Sometimes need to convert REG_UNUSED notes to REG_DEAD
3778 notes. */
3779 /* ??? This won't handle multiple word registers correctly,
3780 but should be good enough for now. */
3781 if (REG_NOTE_KIND (note) == REG_UNUSED
3782 && GET_CODE (XEXP (note, 0)) != SCRATCH
3783 && ! dead_or_set_p (insn, XEXP (note, 0)))
3784 PUT_REG_NOTE_KIND (note, REG_DEAD);
3786 /* The reg only dies in one insn, the last one that uses
3787 it. */
3788 break;
3790 /* It must die somewhere, fail it we couldn't find where it died.
3792 If this is a REG_UNUSED note, then it must be a temporary
3793 register that was not needed by this instantiation of the
3794 pattern, so we can safely ignore it. */
3795 if (insn == first)
3797 if (REG_NOTE_KIND (note) != REG_UNUSED)
3798 abort ();
3800 break;
3803 break;
3805 case REG_WAS_0:
3806 /* If the insn that set the register to 0 was deleted, this
3807 note cannot be relied on any longer. The destination might
3808 even have been moved to memory.
3809 This was observed for SH4 with execute/920501-6.c compilation,
3810 -O2 -fomit-frame-pointer -finline-functions . */
3811 if (GET_CODE (XEXP (note, 0)) == NOTE
3812 || INSN_DELETED_P (XEXP (note, 0)))
3813 break;
3814 /* This note applies to the dest of the original insn. Find the
3815 first new insn that now has the same dest, and move the note
3816 there. */
3818 if (! orig_dest)
3819 abort ();
3821 for (insn = first; ; insn = NEXT_INSN (insn))
3823 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3824 && (temp = single_set (insn))
3825 && rtx_equal_p (SET_DEST (temp), orig_dest))
3827 XEXP (note, 1) = REG_NOTES (insn);
3828 REG_NOTES (insn) = note;
3829 /* The reg is only zero before one insn, the first that
3830 uses it. */
3831 break;
3833 /* If this note refers to a multiple word hard
3834 register, it may have been split into several smaller
3835 hard register references. We could split the notes,
3836 but simply dropping them is good enough. */
3837 if (GET_CODE (orig_dest) == REG
3838 && REGNO (orig_dest) < FIRST_PSEUDO_REGISTER
3839 && HARD_REGNO_NREGS (REGNO (orig_dest),
3840 GET_MODE (orig_dest)) > 1)
3841 break;
3842 /* It must be set somewhere, fail if we couldn't find where it
3843 was set. */
3844 if (insn == last)
3845 abort ();
3847 break;
3849 case REG_EQUAL:
3850 case REG_EQUIV:
3851 /* A REG_EQUIV or REG_EQUAL note on an insn with more than one
3852 set is meaningless. Just drop the note. */
3853 if (! orig_dest)
3854 break;
3856 case REG_NO_CONFLICT:
3857 /* These notes apply to the dest of the original insn. Find the last
3858 new insn that now has the same dest, and move the note there. */
3860 if (! orig_dest)
3861 abort ();
3863 for (insn = last; ; insn = PREV_INSN (insn))
3865 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3866 && (temp = single_set (insn))
3867 && rtx_equal_p (SET_DEST (temp), orig_dest))
3869 XEXP (note, 1) = REG_NOTES (insn);
3870 REG_NOTES (insn) = note;
3871 /* Only put this note on one of the new insns. */
3872 break;
3875 /* The original dest must still be set someplace. Abort if we
3876 couldn't find it. */
3877 if (insn == first)
3879 /* However, if this note refers to a multiple word hard
3880 register, it may have been split into several smaller
3881 hard register references. We could split the notes,
3882 but simply dropping them is good enough. */
3883 if (GET_CODE (orig_dest) == REG
3884 && REGNO (orig_dest) < FIRST_PSEUDO_REGISTER
3885 && HARD_REGNO_NREGS (REGNO (orig_dest),
3886 GET_MODE (orig_dest)) > 1)
3887 break;
3888 /* Likewise for multi-word memory references. */
3889 if (GET_CODE (orig_dest) == MEM
3890 && SIZE_FOR_MODE (orig_dest) > MOVE_MAX)
3891 break;
3892 abort ();
3895 break;
3897 case REG_LIBCALL:
3898 /* Move a REG_LIBCALL note to the first insn created, and update
3899 the corresponding REG_RETVAL note. */
3900 XEXP (note, 1) = REG_NOTES (first);
3901 REG_NOTES (first) = note;
3903 insn = XEXP (note, 0);
3904 note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
3905 if (note)
3906 XEXP (note, 0) = first;
3907 break;
3909 case REG_EXEC_COUNT:
3910 /* Move a REG_EXEC_COUNT note to the first insn created. */
3911 XEXP (note, 1) = REG_NOTES (first);
3912 REG_NOTES (first) = note;
3913 break;
3915 case REG_RETVAL:
3916 /* Move a REG_RETVAL note to the last insn created, and update
3917 the corresponding REG_LIBCALL note. */
3918 XEXP (note, 1) = REG_NOTES (last);
3919 REG_NOTES (last) = note;
3921 insn = XEXP (note, 0);
3922 note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
3923 if (note)
3924 XEXP (note, 0) = last;
3925 break;
3927 case REG_NONNEG:
3928 case REG_BR_PROB:
3929 /* This should be moved to whichever instruction is a JUMP_INSN. */
3931 for (insn = last; ; insn = PREV_INSN (insn))
3933 if (GET_CODE (insn) == JUMP_INSN)
3935 XEXP (note, 1) = REG_NOTES (insn);
3936 REG_NOTES (insn) = note;
3937 /* Only put this note on one of the new insns. */
3938 break;
3940 /* Fail if we couldn't find a JUMP_INSN. */
3941 if (insn == first)
3942 abort ();
3944 break;
3946 case REG_INC:
3947 /* reload sometimes leaves obsolete REG_INC notes around. */
3948 if (reload_completed)
3949 break;
3950 /* This should be moved to whichever instruction now has the
3951 increment operation. */
3952 abort ();
3954 case REG_LABEL:
3955 /* Should be moved to the new insn(s) which use the label. */
3956 for (insn = first; insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
3957 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3958 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
3959 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL,
3960 XEXP (note, 0),
3961 REG_NOTES (insn));
3962 break;
3964 case REG_CC_SETTER:
3965 case REG_CC_USER:
3966 /* These two notes will never appear until after reorg, so we don't
3967 have to handle them here. */
3968 default:
3969 abort ();
3973 /* Each new insn created, except the last, has a new set. If the destination
3974 is a register, then this reg is now live across several insns, whereas
3975 previously the dest reg was born and died within the same insn. To
3976 reflect this, we now need a REG_DEAD note on the insn where this
3977 dest reg dies.
3979 Similarly, the new insns may have clobbers that need REG_UNUSED notes. */
3981 for (insn = first; insn != last; insn = NEXT_INSN (insn))
3983 rtx pat;
3984 int i;
3986 pat = PATTERN (insn);
3987 if (GET_CODE (pat) == SET || GET_CODE (pat) == CLOBBER)
3988 new_insn_dead_notes (pat, insn, last, orig_insn);
3989 else if (GET_CODE (pat) == PARALLEL)
3991 for (i = 0; i < XVECLEN (pat, 0); i++)
3992 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3993 || GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER)
3994 new_insn_dead_notes (XVECEXP (pat, 0, i), insn, last, orig_insn);
3998 /* If any insn, except the last, uses the register set by the last insn,
3999 then we need a new REG_DEAD note on that insn. In this case, there
4000 would not have been a REG_DEAD note for this register in the original
4001 insn because it was used and set within one insn. */
4003 set = single_set (last);
4004 if (set)
4006 rtx dest = SET_DEST (set);
4008 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
4009 || GET_CODE (dest) == STRICT_LOW_PART
4010 || GET_CODE (dest) == SIGN_EXTRACT)
4011 dest = XEXP (dest, 0);
4013 if (GET_CODE (dest) == REG
4014 /* Global registers are always live, so the code below does not
4015 apply to them. */
4016 && (REGNO (dest) >= FIRST_PSEUDO_REGISTER
4017 || ! global_regs[REGNO (dest)]))
4019 rtx stop_insn = PREV_INSN (first);
4021 /* If the last insn uses the register that it is setting, then
4022 we don't want to put a REG_DEAD note there. Search backwards
4023 to find the first insn that sets but does not use DEST. */
4025 insn = last;
4026 if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
4028 for (insn = PREV_INSN (insn); insn != first;
4029 insn = PREV_INSN (insn))
4031 if ((set = single_set (insn))
4032 && reg_mentioned_p (dest, SET_DEST (set))
4033 && ! reg_overlap_mentioned_p (dest, SET_SRC (set)))
4034 break;
4038 /* Now find the first insn that uses but does not set DEST. */
4040 for (insn = PREV_INSN (insn); insn != stop_insn;
4041 insn = PREV_INSN (insn))
4043 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4044 && reg_mentioned_p (dest, PATTERN (insn))
4045 && (set = single_set (insn)))
4047 rtx insn_dest = SET_DEST (set);
4049 while (GET_CODE (insn_dest) == ZERO_EXTRACT
4050 || GET_CODE (insn_dest) == SUBREG
4051 || GET_CODE (insn_dest) == STRICT_LOW_PART
4052 || GET_CODE (insn_dest) == SIGN_EXTRACT)
4053 insn_dest = XEXP (insn_dest, 0);
4055 if (insn_dest != dest)
4057 note = rtx_alloc (EXPR_LIST);
4058 PUT_REG_NOTE_KIND (note, REG_DEAD);
4059 XEXP (note, 0) = dest;
4060 XEXP (note, 1) = REG_NOTES (insn);
4061 REG_NOTES (insn) = note;
4062 /* The reg only dies in one insn, the last one
4063 that uses it. */
4064 break;
4071 /* If the original dest is modifying a multiple register target, and the
4072 original instruction was split such that the original dest is now set
4073 by two or more SUBREG sets, then the split insns no longer kill the
4074 destination of the original insn.
4076 In this case, if there exists an instruction in the same basic block,
4077 before the split insn, which uses the original dest, and this use is
4078 killed by the original insn, then we must remove the REG_DEAD note on
4079 this insn, because it is now superfluous.
4081 This does not apply when a hard register gets split, because the code
4082 knows how to handle overlapping hard registers properly. */
4083 if (orig_dest && GET_CODE (orig_dest) == REG)
4085 int found_orig_dest = 0;
4086 int found_split_dest = 0;
4088 for (insn = first; ; insn = NEXT_INSN (insn))
4090 rtx pat = PATTERN (insn);
4091 int i = GET_CODE (pat) == PARALLEL ? XVECLEN (pat, 0) : 0;
4092 set = pat;
4093 for (;;)
4095 if (GET_CODE (set) == SET)
4097 if (GET_CODE (SET_DEST (set)) == REG
4098 && REGNO (SET_DEST (set)) == REGNO (orig_dest))
4100 found_orig_dest = 1;
4101 break;
4103 else if (GET_CODE (SET_DEST (set)) == SUBREG
4104 && SUBREG_REG (SET_DEST (set)) == orig_dest)
4106 found_split_dest = 1;
4107 break;
4110 if (--i < 0)
4111 break;
4112 set = XVECEXP (pat, 0, i);
4115 if (insn == last)
4116 break;
4119 if (found_split_dest)
4121 /* Search backwards from FIRST, looking for the first insn that uses
4122 the original dest. Stop if we pass a CODE_LABEL or a JUMP_INSN.
4123 If we find an insn, and it has a REG_DEAD note, then delete the
4124 note. */
4126 for (insn = first; insn; insn = PREV_INSN (insn))
4128 if (GET_CODE (insn) == CODE_LABEL
4129 || GET_CODE (insn) == JUMP_INSN)
4130 break;
4131 else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4132 && reg_mentioned_p (orig_dest, insn))
4134 note = find_regno_note (insn, REG_DEAD, REGNO (orig_dest));
4135 if (note)
4136 remove_note (insn, note);
4140 else if (! found_orig_dest)
4142 int i, regno;
4144 /* Should never reach here for a pseudo reg. */
4145 if (REGNO (orig_dest) >= FIRST_PSEUDO_REGISTER)
4146 abort ();
4148 /* This can happen for a hard register, if the splitter
4149 does not bother to emit instructions which would be no-ops.
4150 We try to verify that this is the case by checking to see if
4151 the original instruction uses all of the registers that it
4152 set. This case is OK, because deleting a no-op can not affect
4153 REG_DEAD notes on other insns. If this is not the case, then
4154 abort. */
4156 regno = REGNO (orig_dest);
4157 for (i = HARD_REGNO_NREGS (regno, GET_MODE (orig_dest)) - 1;
4158 i >= 0; i--)
4159 if (! refers_to_regno_p (regno + i, regno + i + 1, orig_insn,
4160 NULL_PTR))
4161 break;
4162 if (i >= 0)
4163 abort ();
4167 /* Update reg_n_sets. This is necessary to prevent local alloc from
4168 converting REG_EQUAL notes to REG_EQUIV when splitting has modified
4169 a reg from set once to set multiple times. */
4172 rtx x = PATTERN (orig_insn);
4173 RTX_CODE code = GET_CODE (x);
4175 if (code == SET || code == CLOBBER)
4176 update_n_sets (x, -1);
4177 else if (code == PARALLEL)
4179 int i;
4180 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
4182 code = GET_CODE (XVECEXP (x, 0, i));
4183 if (code == SET || code == CLOBBER)
4184 update_n_sets (XVECEXP (x, 0, i), -1);
4188 for (insn = first; ; insn = NEXT_INSN (insn))
4190 x = PATTERN (insn);
4191 code = GET_CODE (x);
4193 if (code == SET || code == CLOBBER)
4194 update_n_sets (x, 1);
4195 else if (code == PARALLEL)
4197 int i;
4198 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
4200 code = GET_CODE (XVECEXP (x, 0, i));
4201 if (code == SET || code == CLOBBER)
4202 update_n_sets (XVECEXP (x, 0, i), 1);
4206 if (insn == last)
4207 break;
4212 /* The one entry point in this file. DUMP_FILE is the dump file for
4213 this pass. */
4215 void
4216 schedule_insns (dump_file)
4217 FILE *dump_file;
4219 int max_uid = MAX_INSNS_PER_SPLIT * (get_max_uid () + 1);
4220 int b;
4221 rtx insn;
4223 /* Taking care of this degenerate case makes the rest of
4224 this code simpler. */
4225 if (n_basic_blocks == 0)
4226 return;
4228 /* Create an insn here so that we can hang dependencies off of it later. */
4229 sched_before_next_call
4230 = gen_rtx_INSN (VOIDmode, 0, NULL_RTX, NULL_RTX,
4231 NULL_RTX, 0, NULL_RTX, NULL_RTX);
4233 /* Initialize the unused_*_lists. We can't use the ones left over from
4234 the previous function, because gcc has freed that memory. We can use
4235 the ones left over from the first sched pass in the second pass however,
4236 so only clear them on the first sched pass. The first pass is before
4237 reload if flag_schedule_insns is set, otherwise it is afterwards. */
4239 if (reload_completed == 0 || ! flag_schedule_insns)
4241 unused_insn_list = 0;
4242 unused_expr_list = 0;
4245 /* We create no insns here, only reorder them, so we
4246 remember how far we can cut back the stack on exit. */
4248 /* Allocate data for this pass. See comments, above,
4249 for what these vectors do.
4251 We use xmalloc instead of alloca, because max_uid can be very large
4252 when there is a lot of function inlining. If we used alloca, we could
4253 exceed stack limits on some hosts for some inputs. */
4254 insn_luid = (int *) xmalloc (max_uid * sizeof (int));
4255 insn_priority = (int *) xmalloc (max_uid * sizeof (int));
4256 insn_tick = (int *) xmalloc (max_uid * sizeof (int));
4257 insn_costs = (short *) xmalloc (max_uid * sizeof (short));
4258 insn_units = (short *) xmalloc (max_uid * sizeof (short));
4259 insn_blockage = (unsigned int *) xmalloc (max_uid * sizeof (unsigned int));
4260 insn_ref_count = (int *) xmalloc (max_uid * sizeof (int));
4262 if (reload_completed == 0)
4264 sched_reg_n_calls_crossed = (int *) alloca (max_regno * sizeof (int));
4265 sched_reg_live_length = (int *) alloca (max_regno * sizeof (int));
4266 bb_dead_regs = ALLOCA_REG_SET ();
4267 bb_live_regs = ALLOCA_REG_SET ();
4268 bzero ((char *) sched_reg_n_calls_crossed, max_regno * sizeof (int));
4269 bzero ((char *) sched_reg_live_length, max_regno * sizeof (int));
4271 else
4273 sched_reg_n_calls_crossed = 0;
4274 sched_reg_live_length = 0;
4275 bb_dead_regs = 0;
4276 bb_live_regs = 0;
4278 init_alias_analysis ();
4280 if (write_symbols != NO_DEBUG)
4282 rtx line;
4284 line_note = (rtx *) xmalloc (max_uid * sizeof (rtx));
4285 bzero ((char *) line_note, max_uid * sizeof (rtx));
4286 line_note_head = (rtx *) alloca (n_basic_blocks * sizeof (rtx));
4287 bzero ((char *) line_note_head, n_basic_blocks * sizeof (rtx));
4289 /* Determine the line-number at the start of each basic block.
4290 This must be computed and saved now, because after a basic block's
4291 predecessor has been scheduled, it is impossible to accurately
4292 determine the correct line number for the first insn of the block. */
4294 for (b = 0; b < n_basic_blocks; b++)
4295 for (line = BLOCK_HEAD (b); line; line = PREV_INSN (line))
4296 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
4298 line_note_head[b] = line;
4299 break;
4303 bzero ((char *) insn_luid, max_uid * sizeof (int));
4304 bzero ((char *) insn_priority, max_uid * sizeof (int));
4305 bzero ((char *) insn_tick, max_uid * sizeof (int));
4306 bzero ((char *) insn_costs, max_uid * sizeof (short));
4307 bzero ((char *) insn_units, max_uid * sizeof (short));
4308 bzero ((char *) insn_blockage, max_uid * sizeof (unsigned int));
4309 bzero ((char *) insn_ref_count, max_uid * sizeof (int));
4311 /* Schedule each basic block, block by block. */
4313 /* ??? Add a NOTE after the last insn of the last basic block. It is not
4314 known why this is done. */
4315 /* ??? Perhaps it's done to ensure NEXT_TAIL in schedule_block is a
4316 valid insn. */
4318 insn = BLOCK_END (n_basic_blocks-1);
4319 if (NEXT_INSN (insn) == 0
4320 || (GET_CODE (insn) != NOTE
4321 && GET_CODE (insn) != CODE_LABEL
4322 /* Don't emit a NOTE if it would end up between an unconditional
4323 jump and a BARRIER. */
4324 && ! (GET_CODE (insn) == JUMP_INSN
4325 && GET_CODE (NEXT_INSN (insn)) == BARRIER)))
4326 emit_note_after (NOTE_INSN_DELETED, BLOCK_END (n_basic_blocks-1));
4328 for (b = 0; b < n_basic_blocks; b++)
4330 rtx insn, next;
4332 note_list = 0;
4334 split_block_insns (b, reload_completed == 0 || ! flag_schedule_insns);
4336 schedule_block (b, dump_file);
4338 #ifdef USE_C_ALLOCA
4339 alloca (0);
4340 #endif
4343 /* Reposition the prologue and epilogue notes in case we moved the
4344 prologue/epilogue insns. */
4345 if (reload_completed)
4346 reposition_prologue_and_epilogue_notes (get_insns ());
4348 if (write_symbols != NO_DEBUG)
4350 rtx line = 0;
4351 rtx insn = get_insns ();
4352 int active_insn = 0;
4353 int notes = 0;
4355 /* Walk the insns deleting redundant line-number notes. Many of these
4356 are already present. The remainder tend to occur at basic
4357 block boundaries. */
4358 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
4359 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
4361 /* If there are no active insns following, INSN is redundant. */
4362 if (active_insn == 0)
4364 notes++;
4365 NOTE_SOURCE_FILE (insn) = 0;
4366 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4368 /* If the line number is unchanged, LINE is redundant. */
4369 else if (line
4370 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
4371 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn))
4373 notes++;
4374 NOTE_SOURCE_FILE (line) = 0;
4375 NOTE_LINE_NUMBER (line) = NOTE_INSN_DELETED;
4376 line = insn;
4378 else
4379 line = insn;
4380 active_insn = 0;
4382 else if (! ((GET_CODE (insn) == NOTE
4383 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
4384 || (GET_CODE (insn) == INSN
4385 && (GET_CODE (PATTERN (insn)) == USE
4386 || GET_CODE (PATTERN (insn)) == CLOBBER))))
4387 active_insn++;
4389 if (dump_file && notes)
4390 fprintf (dump_file, ";; deleted %d line-number notes\n", notes);
4393 if (reload_completed == 0)
4395 int regno;
4396 for (regno = 0; regno < max_regno; regno++)
4397 if (sched_reg_live_length[regno])
4399 if (dump_file)
4401 if (REG_LIVE_LENGTH (regno) > sched_reg_live_length[regno])
4402 fprintf (dump_file,
4403 ";; register %d life shortened from %d to %d\n",
4404 regno, REG_LIVE_LENGTH (regno),
4405 sched_reg_live_length[regno]);
4406 /* Negative values are special; don't overwrite the current
4407 reg_live_length value if it is negative. */
4408 else if (REG_LIVE_LENGTH (regno) < sched_reg_live_length[regno]
4409 && REG_LIVE_LENGTH (regno) >= 0)
4410 fprintf (dump_file,
4411 ";; register %d life extended from %d to %d\n",
4412 regno, REG_LIVE_LENGTH (regno),
4413 sched_reg_live_length[regno]);
4415 if (! REG_N_CALLS_CROSSED (regno)
4416 && sched_reg_n_calls_crossed[regno])
4417 fprintf (dump_file,
4418 ";; register %d now crosses calls\n", regno);
4419 else if (REG_N_CALLS_CROSSED (regno)
4420 && ! sched_reg_n_calls_crossed[regno]
4421 && REG_BASIC_BLOCK (regno) != REG_BLOCK_GLOBAL)
4422 fprintf (dump_file,
4423 ";; register %d no longer crosses calls\n", regno);
4426 /* Negative values are special; don't overwrite the current
4427 reg_live_length value if it is negative. */
4428 if (REG_LIVE_LENGTH (regno) >= 0)
4429 REG_LIVE_LENGTH (regno) = sched_reg_live_length[regno];
4431 /* We can't change the value of reg_n_calls_crossed to zero for
4432 pseudos which are live in more than one block.
4434 This is because combine might have made an optimization which
4435 invalidated basic_block_live_at_start and reg_n_calls_crossed,
4436 but it does not update them. If we update reg_n_calls_crossed
4437 here, the two variables are now inconsistent, and this might
4438 confuse the caller-save code into saving a register that doesn't
4439 need to be saved. This is only a problem when we zero calls
4440 crossed for a pseudo live in multiple basic blocks.
4442 Alternatively, we could try to correctly update basic block live
4443 at start here in sched, but that seems complicated. */
4444 if (sched_reg_n_calls_crossed[regno]
4445 || REG_BASIC_BLOCK (regno) != REG_BLOCK_GLOBAL)
4446 REG_N_CALLS_CROSSED (regno) = sched_reg_n_calls_crossed[regno];
4450 free (insn_luid);
4451 free (insn_priority);
4452 free (insn_tick);
4453 free (insn_costs);
4454 free (insn_units);
4455 free (insn_blockage);
4456 free (insn_ref_count);
4458 if (write_symbols != NO_DEBUG)
4459 free (line_note);
4461 if (reload_completed == 0)
4463 FREE_REG_SET (bb_dead_regs);
4464 FREE_REG_SET (bb_live_regs);
4468 #endif /* INSN_SCHEDULING */