Renamer improvements.
[official-gcc.git] / gcc / sched-deps.c
blob396b519b164dab5c57caae29e355a474b21c5d06
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GNU CC.
10 GNU CC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published by the
12 Free Software Foundation; either version 2, or (at your option) any
13 later version.
15 GNU CC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GNU CC; see the file COPYING. If not, write to the Free
22 the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA
23 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "toplev.h"
28 #include "rtl.h"
29 #include "tm_p.h"
30 #include "hard-reg-set.h"
31 #include "basic-block.h"
32 #include "regs.h"
33 #include "function.h"
34 #include "flags.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
37 #include "except.h"
38 #include "toplev.h"
39 #include "recog.h"
40 #include "sched-int.h"
42 extern char *reg_known_equiv_p;
43 extern rtx *reg_known_value;
45 static regset_head reg_pending_sets_head;
46 static regset_head reg_pending_clobbers_head;
48 static regset reg_pending_sets;
49 static regset reg_pending_clobbers;
50 static int reg_pending_sets_all;
52 /* To speed up the test for duplicate dependency links we keep a
53 record of dependencies created by add_dependence when the average
54 number of instructions in a basic block is very large.
56 Studies have shown that there is typically around 5 instructions between
57 branches for typical C code. So we can make a guess that the average
58 basic block is approximately 5 instructions long; we will choose 100X
59 the average size as a very large basic block.
61 Each insn has associated bitmaps for its dependencies. Each bitmap
62 has enough entries to represent a dependency on any other insn in
63 the insn chain. All bitmap for true dependencies cache is
64 allocated then the rest two ones are also allocated. */
65 static sbitmap *true_dependency_cache;
66 static sbitmap *anti_dependency_cache;
67 static sbitmap *output_dependency_cache;
69 /* To speed up checking consistency of formed forward insn
70 dependencies we use the following cache. Another possible solution
71 could be switching off checking duplication of insns in forward
72 dependencies. */
73 #ifdef ENABLE_CHECKING
74 static sbitmap *forward_dependency_cache;
75 #endif
77 static void remove_dependence PARAMS ((rtx, rtx));
78 static void set_sched_group_p PARAMS ((rtx));
80 static void flush_pending_lists PARAMS ((struct deps *, rtx, int));
81 static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
82 static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
83 static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
84 static rtx group_leader PARAMS ((rtx));
86 /* Return the INSN_LIST containing INSN in LIST, or NULL
87 if LIST does not contain INSN. */
89 HAIFA_INLINE rtx
90 find_insn_list (insn, list)
91 rtx insn;
92 rtx list;
94 while (list)
96 if (XEXP (list, 0) == insn)
97 return list;
98 list = XEXP (list, 1);
100 return 0;
103 /* Return 1 if the pair (insn, x) is found in (LIST, LIST1), or 0
104 otherwise. */
106 HAIFA_INLINE int
107 find_insn_mem_list (insn, x, list, list1)
108 rtx insn, x;
109 rtx list, list1;
111 while (list)
113 if (XEXP (list, 0) == insn
114 && XEXP (list1, 0) == x)
115 return 1;
116 list = XEXP (list, 1);
117 list1 = XEXP (list1, 1);
119 return 0;
122 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
123 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
124 of dependence that this link represents. */
126 void
127 add_dependence (insn, elem, dep_type)
128 rtx insn;
129 rtx elem;
130 enum reg_note dep_type;
132 rtx link, next;
133 int present_p;
134 enum reg_note present_dep_type;
136 /* Don't depend an insn on itself. */
137 if (insn == elem)
138 return;
140 /* We can get a dependency on deleted insns due to optimizations in
141 the register allocation and reloading or due to splitting. Any
142 such dependency is useless and can be ignored. */
143 if (GET_CODE (elem) == NOTE)
144 return;
146 /* If elem is part of a sequence that must be scheduled together, then
147 make the dependence point to the last insn of the sequence.
148 When HAVE_cc0, it is possible for NOTEs to exist between users and
149 setters of the condition codes, so we must skip past notes here.
150 Otherwise, NOTEs are impossible here. */
151 next = next_nonnote_insn (elem);
152 if (next && SCHED_GROUP_P (next)
153 && GET_CODE (next) != CODE_LABEL)
155 /* Notes will never intervene here though, so don't bother checking
156 for them. */
157 /* Hah! Wrong. */
158 /* We must reject CODE_LABELs, so that we don't get confused by one
159 that has LABEL_PRESERVE_P set, which is represented by the same
160 bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
161 SCHED_GROUP_P. */
163 rtx nnext;
164 while ((nnext = next_nonnote_insn (next)) != NULL
165 && SCHED_GROUP_P (nnext)
166 && GET_CODE (nnext) != CODE_LABEL)
167 next = nnext;
169 /* Again, don't depend an insn on itself. */
170 if (insn == next)
171 return;
173 /* Make the dependence to NEXT, the last insn of the group, instead
174 of the original ELEM. */
175 elem = next;
178 present_p = 1;
179 #ifdef INSN_SCHEDULING
180 /* ??? No good way to tell from here whether we're doing interblock
181 scheduling. Possibly add another callback. */
182 #if 0
183 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
184 No need for interblock dependences with calls, since
185 calls are not moved between blocks. Note: the edge where
186 elem is a CALL is still required. */
187 if (GET_CODE (insn) == CALL_INSN
188 && (INSN_BB (elem) != INSN_BB (insn)))
189 return;
190 #endif
192 /* If we already have a dependency for ELEM, then we do not need to
193 do anything. Avoiding the list walk below can cut compile times
194 dramatically for some code. */
195 if (true_dependency_cache != NULL)
197 if (anti_dependency_cache == NULL || output_dependency_cache == NULL)
198 abort ();
199 if (TEST_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem)))
200 present_dep_type = 0;
201 else if (TEST_BIT (anti_dependency_cache[INSN_LUID (insn)],
202 INSN_LUID (elem)))
203 present_dep_type = REG_DEP_ANTI;
204 else if (TEST_BIT (output_dependency_cache[INSN_LUID (insn)],
205 INSN_LUID (elem)))
206 present_dep_type = REG_DEP_OUTPUT;
207 else
208 present_p = 0;
209 if (present_p && (int) dep_type >= (int) present_dep_type)
210 return;
212 #endif
214 /* Check that we don't already have this dependence. */
215 if (present_p)
216 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
217 if (XEXP (link, 0) == elem)
219 #ifdef INSN_SCHEDULING
220 /* Clear corresponding cache entry because type of the link
221 may be changed. */
222 if (true_dependency_cache != NULL)
224 if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
225 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
226 INSN_LUID (elem));
227 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
228 && output_dependency_cache)
229 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
230 INSN_LUID (elem));
231 else
232 abort ();
234 #endif
236 /* If this is a more restrictive type of dependence than the existing
237 one, then change the existing dependence to this type. */
238 if ((int) dep_type < (int) REG_NOTE_KIND (link))
239 PUT_REG_NOTE_KIND (link, dep_type);
241 #ifdef INSN_SCHEDULING
242 /* If we are adding a dependency to INSN's LOG_LINKs, then
243 note that in the bitmap caches of dependency information. */
244 if (true_dependency_cache != NULL)
246 if ((int)REG_NOTE_KIND (link) == 0)
247 SET_BIT (true_dependency_cache[INSN_LUID (insn)],
248 INSN_LUID (elem));
249 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
250 SET_BIT (anti_dependency_cache[INSN_LUID (insn)],
251 INSN_LUID (elem));
252 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
253 SET_BIT (output_dependency_cache[INSN_LUID (insn)],
254 INSN_LUID (elem));
256 #endif
257 return;
259 /* Might want to check one level of transitivity to save conses. */
261 link = alloc_INSN_LIST (elem, LOG_LINKS (insn));
262 LOG_LINKS (insn) = link;
264 /* Insn dependency, not data dependency. */
265 PUT_REG_NOTE_KIND (link, dep_type);
267 #ifdef INSN_SCHEDULING
268 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
269 in the bitmap caches of dependency information. */
270 if (true_dependency_cache != NULL)
272 if ((int)dep_type == 0)
273 SET_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
274 else if (dep_type == REG_DEP_ANTI)
275 SET_BIT (anti_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
276 else if (dep_type == REG_DEP_OUTPUT)
277 SET_BIT (output_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
279 #endif
282 /* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
283 of INSN. Abort if not found. */
285 static void
286 remove_dependence (insn, elem)
287 rtx insn;
288 rtx elem;
290 rtx prev, link, next;
291 int found = 0;
293 for (prev = 0, link = LOG_LINKS (insn); link; link = next)
295 next = XEXP (link, 1);
296 if (XEXP (link, 0) == elem)
298 if (prev)
299 XEXP (prev, 1) = next;
300 else
301 LOG_LINKS (insn) = next;
303 #ifdef INSN_SCHEDULING
304 /* If we are removing a dependency from the LOG_LINKS list,
305 make sure to remove it from the cache too. */
306 if (true_dependency_cache != NULL)
308 if (REG_NOTE_KIND (link) == 0)
309 RESET_BIT (true_dependency_cache[INSN_LUID (insn)],
310 INSN_LUID (elem));
311 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
312 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
313 INSN_LUID (elem));
314 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
315 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
316 INSN_LUID (elem));
318 #endif
320 free_INSN_LIST_node (link);
322 found = 1;
324 else
325 prev = link;
328 if (!found)
329 abort ();
330 return;
333 /* Return an insn which represents a SCHED_GROUP, which is
334 the last insn in the group. */
336 static rtx
337 group_leader (insn)
338 rtx insn;
340 rtx prev;
344 prev = insn;
345 insn = next_nonnote_insn (insn);
347 while (insn && SCHED_GROUP_P (insn) && (GET_CODE (insn) != CODE_LABEL));
349 return prev;
352 /* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
353 goes along with that. */
355 static void
356 set_sched_group_p (insn)
357 rtx insn;
359 rtx link, prev;
361 SCHED_GROUP_P (insn) = 1;
363 /* There may be a note before this insn now, but all notes will
364 be removed before we actually try to schedule the insns, so
365 it won't cause a problem later. We must avoid it here though. */
366 prev = prev_nonnote_insn (insn);
368 /* Make a copy of all dependencies on the immediately previous insn,
369 and add to this insn. This is so that all the dependencies will
370 apply to the group. Remove an explicit dependence on this insn
371 as SCHED_GROUP_P now represents it. */
373 if (find_insn_list (prev, LOG_LINKS (insn)))
374 remove_dependence (insn, prev);
376 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
377 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
380 /* Process an insn's memory dependencies. There are four kinds of
381 dependencies:
383 (0) read dependence: read follows read
384 (1) true dependence: read follows write
385 (2) anti dependence: write follows read
386 (3) output dependence: write follows write
388 We are careful to build only dependencies which actually exist, and
389 use transitivity to avoid building too many links. */
391 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
392 The MEM is a memory reference contained within INSN, which we are saving
393 so that we can do memory aliasing on it. */
395 void
396 add_insn_mem_dependence (deps, insn_list, mem_list, insn, mem)
397 struct deps *deps;
398 rtx *insn_list, *mem_list, insn, mem;
400 register rtx link;
402 link = alloc_INSN_LIST (insn, *insn_list);
403 *insn_list = link;
405 link = alloc_EXPR_LIST (VOIDmode, mem, *mem_list);
406 *mem_list = link;
408 deps->pending_lists_length++;
411 /* Make a dependency between every memory reference on the pending lists
412 and INSN, thus flushing the pending lists. If ONLY_WRITE, don't flush
413 the read list. */
415 static void
416 flush_pending_lists (deps, insn, only_write)
417 struct deps *deps;
418 rtx insn;
419 int only_write;
421 rtx u;
422 rtx link;
424 while (deps->pending_read_insns && ! only_write)
426 add_dependence (insn, XEXP (deps->pending_read_insns, 0),
427 REG_DEP_ANTI);
429 link = deps->pending_read_insns;
430 deps->pending_read_insns = XEXP (deps->pending_read_insns, 1);
431 free_INSN_LIST_node (link);
433 link = deps->pending_read_mems;
434 deps->pending_read_mems = XEXP (deps->pending_read_mems, 1);
435 free_EXPR_LIST_node (link);
437 while (deps->pending_write_insns)
439 add_dependence (insn, XEXP (deps->pending_write_insns, 0),
440 REG_DEP_ANTI);
442 link = deps->pending_write_insns;
443 deps->pending_write_insns = XEXP (deps->pending_write_insns, 1);
444 free_INSN_LIST_node (link);
446 link = deps->pending_write_mems;
447 deps->pending_write_mems = XEXP (deps->pending_write_mems, 1);
448 free_EXPR_LIST_node (link);
450 deps->pending_lists_length = 0;
452 /* last_pending_memory_flush is now a list of insns. */
453 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
454 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
456 free_INSN_LIST_list (&deps->last_pending_memory_flush);
457 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
460 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
461 rtx, X, creating all dependencies generated by the write to the
462 destination of X, and reads of everything mentioned. */
464 static void
465 sched_analyze_1 (deps, x, insn)
466 struct deps *deps;
467 rtx x;
468 rtx insn;
470 register int regno;
471 register rtx dest = XEXP (x, 0);
472 enum rtx_code code = GET_CODE (x);
474 if (dest == 0)
475 return;
477 if (GET_CODE (dest) == PARALLEL
478 && GET_MODE (dest) == BLKmode)
480 register int i;
481 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
482 sched_analyze_1 (deps, XVECEXP (dest, 0, i), insn);
483 if (GET_CODE (x) == SET)
484 sched_analyze_2 (deps, SET_SRC (x), insn);
485 return;
488 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
489 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
491 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
493 /* The second and third arguments are values read by this insn. */
494 sched_analyze_2 (deps, XEXP (dest, 1), insn);
495 sched_analyze_2 (deps, XEXP (dest, 2), insn);
497 dest = XEXP (dest, 0);
500 if (GET_CODE (dest) == REG)
502 register int i;
504 regno = REGNO (dest);
506 /* A hard reg in a wide mode may really be multiple registers.
507 If so, mark all of them just like the first. */
508 if (regno < FIRST_PSEUDO_REGISTER)
510 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
511 while (--i >= 0)
513 int r = regno + i;
514 rtx u;
516 for (u = deps->reg_last_uses[r]; u; u = XEXP (u, 1))
517 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
519 for (u = deps->reg_last_sets[r]; u; u = XEXP (u, 1))
520 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
522 /* Clobbers need not be ordered with respect to one
523 another, but sets must be ordered with respect to a
524 pending clobber. */
525 if (code == SET)
527 free_INSN_LIST_list (&deps->reg_last_uses[r]);
528 for (u = deps->reg_last_clobbers[r]; u; u = XEXP (u, 1))
529 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
530 SET_REGNO_REG_SET (reg_pending_sets, r);
532 else
533 SET_REGNO_REG_SET (reg_pending_clobbers, r);
535 /* Function calls clobber all call_used regs. */
536 if (global_regs[r] || (code == SET && call_used_regs[r]))
537 for (u = deps->last_function_call; u; u = XEXP (u, 1))
538 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
541 else
543 rtx u;
545 for (u = deps->reg_last_uses[regno]; u; u = XEXP (u, 1))
546 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
548 for (u = deps->reg_last_sets[regno]; u; u = XEXP (u, 1))
549 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
551 if (code == SET)
553 free_INSN_LIST_list (&deps->reg_last_uses[regno]);
554 for (u = deps->reg_last_clobbers[regno]; u; u = XEXP (u, 1))
555 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
556 SET_REGNO_REG_SET (reg_pending_sets, regno);
558 else
559 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
561 /* Pseudos that are REG_EQUIV to something may be replaced
562 by that during reloading. We need only add dependencies for
563 the address in the REG_EQUIV note. */
564 if (!reload_completed
565 && reg_known_equiv_p[regno]
566 && GET_CODE (reg_known_value[regno]) == MEM)
567 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
569 /* Don't let it cross a call after scheduling if it doesn't
570 already cross one. */
572 if (REG_N_CALLS_CROSSED (regno) == 0)
573 for (u = deps->last_function_call; u; u = XEXP (u, 1))
574 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
577 else if (GET_CODE (dest) == MEM)
579 /* Writing memory. */
581 if (deps->pending_lists_length > 32)
583 /* Flush all pending reads and writes to prevent the pending lists
584 from getting any larger. Insn scheduling runs too slowly when
585 these lists get long. The number 32 was chosen because it
586 seems like a reasonable number. When compiling GCC with itself,
587 this flush occurs 8 times for sparc, and 10 times for m88k using
588 the number 32. */
589 flush_pending_lists (deps, insn, 0);
591 else
593 rtx u;
594 rtx pending, pending_mem;
596 pending = deps->pending_read_insns;
597 pending_mem = deps->pending_read_mems;
598 while (pending)
600 if (anti_dependence (XEXP (pending_mem, 0), dest))
601 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
603 pending = XEXP (pending, 1);
604 pending_mem = XEXP (pending_mem, 1);
607 pending = deps->pending_write_insns;
608 pending_mem = deps->pending_write_mems;
609 while (pending)
611 if (output_dependence (XEXP (pending_mem, 0), dest))
612 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
614 pending = XEXP (pending, 1);
615 pending_mem = XEXP (pending_mem, 1);
618 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
619 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
621 add_insn_mem_dependence (deps, &deps->pending_write_insns,
622 &deps->pending_write_mems, insn, dest);
624 sched_analyze_2 (deps, XEXP (dest, 0), insn);
627 /* Analyze reads. */
628 if (GET_CODE (x) == SET)
629 sched_analyze_2 (deps, SET_SRC (x), insn);
632 /* Analyze the uses of memory and registers in rtx X in INSN. */
634 static void
635 sched_analyze_2 (deps, x, insn)
636 struct deps *deps;
637 rtx x;
638 rtx insn;
640 register int i;
641 register int j;
642 register enum rtx_code code;
643 register const char *fmt;
645 if (x == 0)
646 return;
648 code = GET_CODE (x);
650 switch (code)
652 case CONST_INT:
653 case CONST_DOUBLE:
654 case SYMBOL_REF:
655 case CONST:
656 case LABEL_REF:
657 /* Ignore constants. Note that we must handle CONST_DOUBLE here
658 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
659 this does not mean that this insn is using cc0. */
660 return;
662 #ifdef HAVE_cc0
663 case CC0:
664 /* User of CC0 depends on immediately preceding insn. */
665 set_sched_group_p (insn);
666 return;
667 #endif
669 case REG:
671 rtx u;
672 int regno = REGNO (x);
673 if (regno < FIRST_PSEUDO_REGISTER)
675 int i;
677 i = HARD_REGNO_NREGS (regno, GET_MODE (x));
678 while (--i >= 0)
680 int r = regno + i;
681 deps->reg_last_uses[r]
682 = alloc_INSN_LIST (insn, deps->reg_last_uses[r]);
684 for (u = deps->reg_last_sets[r]; u; u = XEXP (u, 1))
685 add_dependence (insn, XEXP (u, 0), 0);
687 /* ??? This should never happen. */
688 for (u = deps->reg_last_clobbers[r]; u; u = XEXP (u, 1))
689 add_dependence (insn, XEXP (u, 0), 0);
691 if (call_used_regs[r] || global_regs[r])
692 /* Function calls clobber all call_used regs. */
693 for (u = deps->last_function_call; u; u = XEXP (u, 1))
694 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
697 else
699 deps->reg_last_uses[regno]
700 = alloc_INSN_LIST (insn, deps->reg_last_uses[regno]);
702 for (u = deps->reg_last_sets[regno]; u; u = XEXP (u, 1))
703 add_dependence (insn, XEXP (u, 0), 0);
705 /* ??? This should never happen. */
706 for (u = deps->reg_last_clobbers[regno]; u; u = XEXP (u, 1))
707 add_dependence (insn, XEXP (u, 0), 0);
709 /* Pseudos that are REG_EQUIV to something may be replaced
710 by that during reloading. We need only add dependencies for
711 the address in the REG_EQUIV note. */
712 if (!reload_completed
713 && reg_known_equiv_p[regno]
714 && GET_CODE (reg_known_value[regno]) == MEM)
715 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
717 /* If the register does not already cross any calls, then add this
718 insn to the sched_before_next_call list so that it will still
719 not cross calls after scheduling. */
720 if (REG_N_CALLS_CROSSED (regno) == 0)
721 add_dependence (deps->sched_before_next_call, insn,
722 REG_DEP_ANTI);
724 return;
727 case MEM:
729 /* Reading memory. */
730 rtx u;
731 rtx pending, pending_mem;
733 pending = deps->pending_read_insns;
734 pending_mem = deps->pending_read_mems;
735 while (pending)
737 if (read_dependence (XEXP (pending_mem, 0), x))
738 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
740 pending = XEXP (pending, 1);
741 pending_mem = XEXP (pending_mem, 1);
744 pending = deps->pending_write_insns;
745 pending_mem = deps->pending_write_mems;
746 while (pending)
748 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
749 x, rtx_varies_p))
750 add_dependence (insn, XEXP (pending, 0), 0);
752 pending = XEXP (pending, 1);
753 pending_mem = XEXP (pending_mem, 1);
756 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
757 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
759 /* Always add these dependencies to pending_reads, since
760 this insn may be followed by a write. */
761 add_insn_mem_dependence (deps, &deps->pending_read_insns,
762 &deps->pending_read_mems, insn, x);
764 /* Take advantage of tail recursion here. */
765 sched_analyze_2 (deps, XEXP (x, 0), insn);
766 return;
769 /* Force pending stores to memory in case a trap handler needs them. */
770 case TRAP_IF:
771 flush_pending_lists (deps, insn, 1);
772 break;
774 case ASM_OPERANDS:
775 case ASM_INPUT:
776 case UNSPEC_VOLATILE:
778 rtx u;
780 /* Traditional and volatile asm instructions must be considered to use
781 and clobber all hard registers, all pseudo-registers and all of
782 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
784 Consider for instance a volatile asm that changes the fpu rounding
785 mode. An insn should not be moved across this even if it only uses
786 pseudo-regs because it might give an incorrectly rounded result. */
787 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
789 int max_reg = max_reg_num ();
790 for (i = 0; i < max_reg; i++)
792 for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
793 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
794 free_INSN_LIST_list (&deps->reg_last_uses[i]);
796 for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
797 add_dependence (insn, XEXP (u, 0), 0);
799 for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
800 add_dependence (insn, XEXP (u, 0), 0);
802 reg_pending_sets_all = 1;
804 flush_pending_lists (deps, insn, 0);
807 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
808 We can not just fall through here since then we would be confused
809 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
810 traditional asms unlike their normal usage. */
812 if (code == ASM_OPERANDS)
814 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
815 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
816 return;
818 break;
821 case PRE_DEC:
822 case POST_DEC:
823 case PRE_INC:
824 case POST_INC:
825 /* These both read and modify the result. We must handle them as writes
826 to get proper dependencies for following instructions. We must handle
827 them as reads to get proper dependencies from this to previous
828 instructions. Thus we need to pass them to both sched_analyze_1
829 and sched_analyze_2. We must call sched_analyze_2 first in order
830 to get the proper antecedent for the read. */
831 sched_analyze_2 (deps, XEXP (x, 0), insn);
832 sched_analyze_1 (deps, x, insn);
833 return;
835 case POST_MODIFY:
836 case PRE_MODIFY:
837 /* op0 = op0 + op1 */
838 sched_analyze_2 (deps, XEXP (x, 0), insn);
839 sched_analyze_2 (deps, XEXP (x, 1), insn);
840 sched_analyze_1 (deps, x, insn);
841 return;
843 default:
844 break;
847 /* Other cases: walk the insn. */
848 fmt = GET_RTX_FORMAT (code);
849 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
851 if (fmt[i] == 'e')
852 sched_analyze_2 (deps, XEXP (x, i), insn);
853 else if (fmt[i] == 'E')
854 for (j = 0; j < XVECLEN (x, i); j++)
855 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
859 /* Analyze an INSN with pattern X to find all dependencies. */
861 static void
862 sched_analyze_insn (deps, x, insn, loop_notes)
863 struct deps *deps;
864 rtx x, insn;
865 rtx loop_notes;
867 register RTX_CODE code = GET_CODE (x);
868 rtx link;
869 int maxreg = max_reg_num ();
870 int i;
872 if (code == COND_EXEC)
874 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
876 /* ??? Should be recording conditions so we reduce the number of
877 false dependancies. */
878 x = COND_EXEC_CODE (x);
879 code = GET_CODE (x);
881 if (code == SET || code == CLOBBER)
882 sched_analyze_1 (deps, x, insn);
883 else if (code == PARALLEL)
885 register int i;
886 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
888 rtx sub = XVECEXP (x, 0, i);
889 code = GET_CODE (sub);
891 if (code == COND_EXEC)
893 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
894 sub = COND_EXEC_CODE (sub);
895 code = GET_CODE (sub);
897 if (code == SET || code == CLOBBER)
898 sched_analyze_1 (deps, sub, insn);
899 else
900 sched_analyze_2 (deps, sub, insn);
903 else
904 sched_analyze_2 (deps, x, insn);
906 /* Mark registers CLOBBERED or used by called function. */
907 if (GET_CODE (insn) == CALL_INSN)
908 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
910 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
911 sched_analyze_1 (deps, XEXP (link, 0), insn);
912 else
913 sched_analyze_2 (deps, XEXP (link, 0), insn);
916 if (GET_CODE (insn) == JUMP_INSN)
918 rtx next, u, pending, pending_mem;
919 next = next_nonnote_insn (insn);
920 if (next && GET_CODE (next) == BARRIER)
922 for (i = 0; i < maxreg; i++)
924 for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
925 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
926 for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
927 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
928 for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
929 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
932 else
934 regset_head tmp;
935 INIT_REG_SET (&tmp);
937 (*current_sched_info->compute_jump_reg_dependencies) (insn, &tmp);
938 EXECUTE_IF_SET_IN_REG_SET
939 (&tmp, 0, i,
941 for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
942 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
943 deps->reg_last_uses[i]
944 = alloc_INSN_LIST (insn, deps->reg_last_uses[i]);
947 CLEAR_REG_SET (&tmp);
949 pending = deps->pending_write_insns;
950 pending_mem = deps->pending_write_mems;
951 while (pending)
953 add_dependence (insn, XEXP (pending, 0), 0);
955 pending = XEXP (pending, 1);
956 pending_mem = XEXP (pending_mem, 1);
959 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
960 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
963 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
964 block, then we must be sure that no instructions are scheduled across it.
965 Otherwise, the reg_n_refs info (which depends on loop_depth) would
966 become incorrect. */
968 if (loop_notes)
970 int max_reg = max_reg_num ();
971 int schedule_barrier_found = 0;
972 rtx link;
974 /* Update loop_notes with any notes from this insn. Also determine
975 if any of the notes on the list correspond to instruction scheduling
976 barriers (loop, eh & setjmp notes, but not range notes. */
977 link = loop_notes;
978 while (XEXP (link, 1))
980 if (INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_BEG
981 || INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END
982 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_BEG
983 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END
984 || INTVAL (XEXP (link, 0)) == NOTE_INSN_SETJMP)
985 schedule_barrier_found = 1;
987 link = XEXP (link, 1);
989 XEXP (link, 1) = REG_NOTES (insn);
990 REG_NOTES (insn) = loop_notes;
992 /* Add dependencies if a scheduling barrier was found. */
993 if (schedule_barrier_found)
995 for (i = 0; i < max_reg; i++)
997 rtx u;
998 for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
999 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1000 free_INSN_LIST_list (&deps->reg_last_uses[i]);
1002 for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
1003 add_dependence (insn, XEXP (u, 0), 0);
1005 for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
1006 add_dependence (insn, XEXP (u, 0), 0);
1008 reg_pending_sets_all = 1;
1010 flush_pending_lists (deps, insn, 0);
1015 /* Accumulate clobbers until the next set so that it will be output dependent
1016 on all of them. At the next set we can clear the clobber list, since
1017 subsequent sets will be output dependent on it. */
1018 EXECUTE_IF_SET_IN_REG_SET
1019 (reg_pending_sets, 0, i,
1021 free_INSN_LIST_list (&deps->reg_last_sets[i]);
1022 free_INSN_LIST_list (&deps->reg_last_clobbers[i]);
1023 deps->reg_last_sets[i] = alloc_INSN_LIST (insn, NULL_RTX);
1025 EXECUTE_IF_SET_IN_REG_SET
1026 (reg_pending_clobbers, 0, i,
1028 deps->reg_last_clobbers[i]
1029 = alloc_INSN_LIST (insn, deps->reg_last_clobbers[i]);
1031 CLEAR_REG_SET (reg_pending_sets);
1032 CLEAR_REG_SET (reg_pending_clobbers);
1034 if (reg_pending_sets_all)
1036 for (i = 0; i < maxreg; i++)
1038 free_INSN_LIST_list (&deps->reg_last_sets[i]);
1039 free_INSN_LIST_list (&deps->reg_last_clobbers[i]);
1040 deps->reg_last_sets[i] = alloc_INSN_LIST (insn, NULL_RTX);
1043 reg_pending_sets_all = 0;
1046 /* If a post-call group is still open, see if it should remain so.
1047 This insn must be a simple move of a hard reg to a pseudo or
1048 vice-versa.
1050 We must avoid moving these insns for correctness on
1051 SMALL_REGISTER_CLASS machines, and for special registers like
1052 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1053 hard regs for all targets. */
1055 if (deps->in_post_call_group_p)
1057 rtx tmp, set = single_set (insn);
1058 int src_regno, dest_regno;
1060 if (set == NULL)
1061 goto end_call_group;
1063 tmp = SET_DEST (set);
1064 if (GET_CODE (tmp) == SUBREG)
1065 tmp = SUBREG_REG (tmp);
1066 if (GET_CODE (tmp) == REG)
1067 dest_regno = REGNO (tmp);
1068 else
1069 goto end_call_group;
1071 tmp = SET_SRC (set);
1072 if (GET_CODE (tmp) == SUBREG)
1073 tmp = SUBREG_REG (tmp);
1074 if (GET_CODE (tmp) == REG)
1075 src_regno = REGNO (tmp);
1076 else
1077 goto end_call_group;
1079 if (src_regno < FIRST_PSEUDO_REGISTER
1080 || dest_regno < FIRST_PSEUDO_REGISTER)
1082 set_sched_group_p (insn);
1083 CANT_MOVE (insn) = 1;
1085 else
1087 end_call_group:
1088 deps->in_post_call_group_p = 0;
1093 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1094 for every dependency. */
1096 void
1097 sched_analyze (deps, head, tail)
1098 struct deps *deps;
1099 rtx head, tail;
1101 register rtx insn;
1102 register rtx u;
1103 rtx loop_notes = 0;
1105 for (insn = head;; insn = NEXT_INSN (insn))
1107 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1109 /* Clear out the stale LOG_LINKS from flow. */
1110 free_INSN_LIST_list (&LOG_LINKS (insn));
1112 /* Clear out stale SCHED_GROUP_P. */
1113 SCHED_GROUP_P (insn) = 0;
1115 /* Make each JUMP_INSN a scheduling barrier for memory
1116 references. */
1117 if (GET_CODE (insn) == JUMP_INSN)
1118 deps->last_pending_memory_flush
1119 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
1120 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1121 loop_notes = 0;
1123 else if (GET_CODE (insn) == CALL_INSN)
1125 rtx x;
1126 register int i;
1128 /* Clear out stale SCHED_GROUP_P. */
1129 SCHED_GROUP_P (insn) = 0;
1131 CANT_MOVE (insn) = 1;
1133 /* Clear out the stale LOG_LINKS from flow. */
1134 free_INSN_LIST_list (&LOG_LINKS (insn));
1136 /* Any instruction using a hard register which may get clobbered
1137 by a call needs to be marked as dependent on this call.
1138 This prevents a use of a hard return reg from being moved
1139 past a void call (i.e. it does not explicitly set the hard
1140 return reg). */
1142 /* If this call is followed by a NOTE_INSN_SETJMP, then assume that
1143 all registers, not just hard registers, may be clobbered by this
1144 call. */
1146 /* Insn, being a CALL_INSN, magically depends on
1147 `last_function_call' already. */
1149 if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == NOTE
1150 && NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP)
1152 int max_reg = max_reg_num ();
1153 for (i = 0; i < max_reg; i++)
1155 for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
1156 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1157 free_INSN_LIST_list (&deps->reg_last_uses[i]);
1159 for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
1160 add_dependence (insn, XEXP (u, 0), 0);
1162 for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
1163 add_dependence (insn, XEXP (u, 0), 0);
1165 reg_pending_sets_all = 1;
1167 /* Add a pair of REG_SAVE_NOTEs which we will later
1168 convert back into a NOTE_INSN_SETJMP note. See
1169 reemit_notes for why we use a pair of NOTEs. */
1170 REG_NOTES (insn) = alloc_EXPR_LIST (REG_SAVE_NOTE,
1171 GEN_INT (0),
1172 REG_NOTES (insn));
1173 REG_NOTES (insn) = alloc_EXPR_LIST (REG_SAVE_NOTE,
1174 GEN_INT (NOTE_INSN_SETJMP),
1175 REG_NOTES (insn));
1177 else
1179 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1180 if (call_used_regs[i] || global_regs[i])
1182 for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
1183 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1185 for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
1186 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1188 SET_REGNO_REG_SET (reg_pending_clobbers, i);
1192 /* For each insn which shouldn't cross a call, add a dependence
1193 between that insn and this call insn. */
1194 x = LOG_LINKS (deps->sched_before_next_call);
1195 while (x)
1197 add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
1198 x = XEXP (x, 1);
1200 free_INSN_LIST_list (&LOG_LINKS (deps->sched_before_next_call));
1202 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1203 loop_notes = 0;
1205 /* In the absence of interprocedural alias analysis, we must flush
1206 all pending reads and writes, and start new dependencies starting
1207 from here. But only flush writes for constant calls (which may
1208 be passed a pointer to something we haven't written yet). */
1209 flush_pending_lists (deps, insn, CONST_CALL_P (insn));
1211 /* Depend this function call (actually, the user of this
1212 function call) on all hard register clobberage. */
1214 /* last_function_call is now a list of insns. */
1215 free_INSN_LIST_list (&deps->last_function_call);
1216 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
1218 /* Before reload, begin a post-call group, so as to keep the
1219 lifetimes of hard registers correct. */
1220 if (! reload_completed)
1221 deps->in_post_call_group_p = 1;
1224 /* See comments on reemit_notes as to why we do this.
1225 ??? Actually, the reemit_notes just say what is done, not why. */
1227 else if (GET_CODE (insn) == NOTE
1228 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_BEG
1229 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_END))
1231 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE, NOTE_RANGE_INFO (insn),
1232 loop_notes);
1233 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1234 GEN_INT (NOTE_LINE_NUMBER (insn)),
1235 loop_notes);
1237 else if (GET_CODE (insn) == NOTE
1238 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
1239 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
1240 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1241 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END
1242 || (NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP
1243 && GET_CODE (PREV_INSN (insn)) != CALL_INSN)))
1245 rtx rtx_region;
1247 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1248 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END)
1249 rtx_region = GEN_INT (NOTE_EH_HANDLER (insn));
1250 else
1251 rtx_region = GEN_INT (0);
1253 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1254 rtx_region,
1255 loop_notes);
1256 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1257 GEN_INT (NOTE_LINE_NUMBER (insn)),
1258 loop_notes);
1259 CONST_CALL_P (loop_notes) = CONST_CALL_P (insn);
1262 if (insn == tail)
1263 return;
1265 abort ();
1268 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1269 dependences from LOG_LINKS to build forward dependences in
1270 INSN_DEPEND. */
1272 void
1273 compute_forward_dependences (head, tail)
1274 rtx head, tail;
1276 rtx insn, link;
1277 rtx next_tail;
1278 enum reg_note dep_type;
1280 next_tail = NEXT_INSN (tail);
1281 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1283 if (! INSN_P (insn))
1284 continue;
1286 insn = group_leader (insn);
1288 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
1290 rtx x = group_leader (XEXP (link, 0));
1291 rtx new_link;
1293 if (x != XEXP (link, 0))
1294 continue;
1296 #ifdef ENABLE_CHECKING
1297 /* If add_dependence is working properly there should never
1298 be notes, deleted insns or duplicates in the backward
1299 links. Thus we need not check for them here.
1301 However, if we have enabled checking we might as well go
1302 ahead and verify that add_dependence worked properly. */
1303 if (GET_CODE (x) == NOTE
1304 || INSN_DELETED_P (x)
1305 || (forward_dependency_cache != NULL
1306 && TEST_BIT (forward_dependency_cache[INSN_LUID (x)],
1307 INSN_LUID (insn)))
1308 || (forward_dependency_cache == NULL
1309 && find_insn_list (insn, INSN_DEPEND (x))))
1310 abort ();
1311 if (forward_dependency_cache != NULL)
1312 SET_BIT (forward_dependency_cache[INSN_LUID (x)],
1313 INSN_LUID (insn));
1314 #endif
1316 new_link = alloc_INSN_LIST (insn, INSN_DEPEND (x));
1318 dep_type = REG_NOTE_KIND (link);
1319 PUT_REG_NOTE_KIND (new_link, dep_type);
1321 INSN_DEPEND (x) = new_link;
1322 INSN_DEP_COUNT (insn) += 1;
1327 /* Initialize variables for region data dependence analysis.
1328 n_bbs is the number of region blocks. */
1330 void
1331 init_deps (deps)
1332 struct deps *deps;
1334 int maxreg = max_reg_num ();
1335 deps->reg_last_uses = (rtx *) xcalloc (maxreg, sizeof (rtx));
1336 deps->reg_last_sets = (rtx *) xcalloc (maxreg, sizeof (rtx));
1337 deps->reg_last_clobbers = (rtx *) xcalloc (maxreg, sizeof (rtx));
1339 deps->pending_read_insns = 0;
1340 deps->pending_read_mems = 0;
1341 deps->pending_write_insns = 0;
1342 deps->pending_write_mems = 0;
1343 deps->pending_lists_length = 0;
1344 deps->last_pending_memory_flush = 0;
1345 deps->last_function_call = 0;
1346 deps->in_post_call_group_p = 0;
1348 deps->sched_before_next_call
1349 = gen_rtx_INSN (VOIDmode, 0, NULL_RTX, NULL_RTX,
1350 NULL_RTX, 0, NULL_RTX, NULL_RTX);
1351 LOG_LINKS (deps->sched_before_next_call) = 0;
1354 /* Free insn lists found in DEPS. */
1356 void
1357 free_deps (deps)
1358 struct deps *deps;
1360 int max_reg = max_reg_num ();
1361 int i;
1363 /* Note this loop is executed max_reg * nr_regions times. It's first
1364 implementation accounted for over 90% of the calls to free_INSN_LIST_list.
1365 The list was empty for the vast majority of those calls. On the PA, not
1366 calling free_INSN_LIST_list in those cases improves -O2 compile times by
1367 3-5% on average. */
1368 for (i = 0; i < max_reg; ++i)
1370 if (deps->reg_last_clobbers[i])
1371 free_INSN_LIST_list (&deps->reg_last_clobbers[i]);
1372 if (deps->reg_last_sets[i])
1373 free_INSN_LIST_list (&deps->reg_last_sets[i]);
1374 if (deps->reg_last_uses[i])
1375 free_INSN_LIST_list (&deps->reg_last_uses[i]);
1377 free (deps->reg_last_clobbers);
1378 free (deps->reg_last_sets);
1379 free (deps->reg_last_uses);
1382 /* If it is profitable to use them, initialize caches for tracking
1383 dependency informatino. LUID is the number of insns to be scheduled,
1384 it is used in the estimate of profitability. */
1386 void
1387 init_dependency_caches (luid)
1388 int luid;
1390 /* ?!? We could save some memory by computing a per-region luid mapping
1391 which could reduce both the number of vectors in the cache and the size
1392 of each vector. Instead we just avoid the cache entirely unless the
1393 average number of instructions in a basic block is very high. See
1394 the comment before the declaration of true_dependency_cache for
1395 what we consider "very high". */
1396 if (luid / n_basic_blocks > 100 * 5)
1398 true_dependency_cache = sbitmap_vector_alloc (luid, luid);
1399 sbitmap_vector_zero (true_dependency_cache, luid);
1400 anti_dependency_cache = sbitmap_vector_alloc (luid, luid);
1401 sbitmap_vector_zero (anti_dependency_cache, luid);
1402 output_dependency_cache = sbitmap_vector_alloc (luid, luid);
1403 sbitmap_vector_zero (output_dependency_cache, luid);
1404 #ifdef ENABLE_CHECKING
1405 forward_dependency_cache = sbitmap_vector_alloc (luid, luid);
1406 sbitmap_vector_zero (forward_dependency_cache, luid);
1407 #endif
1411 /* Free the caches allocated in init_dependency_caches. */
1413 void
1414 free_dependency_caches ()
1416 if (true_dependency_cache)
1418 free (true_dependency_cache);
1419 true_dependency_cache = NULL;
1420 free (anti_dependency_cache);
1421 anti_dependency_cache = NULL;
1422 free (output_dependency_cache);
1423 output_dependency_cache = NULL;
1424 #ifdef ENABLE_CHECKING
1425 free (forward_dependency_cache);
1426 forward_dependency_cache = NULL;
1427 #endif
1431 /* Initialize some global variables needed by the dependency analysis
1432 code. */
1434 void
1435 init_deps_global ()
1437 reg_pending_sets = INITIALIZE_REG_SET (reg_pending_sets_head);
1438 reg_pending_clobbers = INITIALIZE_REG_SET (reg_pending_clobbers_head);
1439 reg_pending_sets_all = 0;
1442 /* Free everything used by the dependency analysis code. */
1444 void
1445 finish_deps_global ()
1447 FREE_REG_SET (reg_pending_sets);
1448 FREE_REG_SET (reg_pending_clobbers);