Update version
[official-gcc.git] / gcc / sched-deps.c
blob59f14d0f68e05c62e7e7c94eba781a46cb182fa2
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GNU CC.
10 GNU CC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published by the
12 Free Software Foundation; either version 2, or (at your option) any
13 later version.
15 GNU CC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GNU CC; see the file COPYING. If not, write to the Free
22 the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA
23 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "toplev.h"
28 #include "rtl.h"
29 #include "tm_p.h"
30 #include "hard-reg-set.h"
31 #include "basic-block.h"
32 #include "regs.h"
33 #include "function.h"
34 #include "flags.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
37 #include "except.h"
38 #include "toplev.h"
39 #include "recog.h"
40 #include "sched-int.h"
41 #include "params.h"
43 extern char *reg_known_equiv_p;
44 extern rtx *reg_known_value;
46 static regset_head reg_pending_sets_head;
47 static regset_head reg_pending_clobbers_head;
49 static regset reg_pending_sets;
50 static regset reg_pending_clobbers;
51 static int reg_pending_sets_all;
53 /* To speed up the test for duplicate dependency links we keep a
54 record of dependencies created by add_dependence when the average
55 number of instructions in a basic block is very large.
57 Studies have shown that there is typically around 5 instructions between
58 branches for typical C code. So we can make a guess that the average
59 basic block is approximately 5 instructions long; we will choose 100X
60 the average size as a very large basic block.
62 Each insn has associated bitmaps for its dependencies. Each bitmap
63 has enough entries to represent a dependency on any other insn in
64 the insn chain. All bitmap for true dependencies cache is
65 allocated then the rest two ones are also allocated. */
66 static sbitmap *true_dependency_cache;
67 static sbitmap *anti_dependency_cache;
68 static sbitmap *output_dependency_cache;
70 /* To speed up checking consistency of formed forward insn
71 dependencies we use the following cache. Another possible solution
72 could be switching off checking duplication of insns in forward
73 dependencies. */
74 #ifdef ENABLE_CHECKING
75 static sbitmap *forward_dependency_cache;
76 #endif
78 static int deps_may_trap_p PARAMS ((rtx));
79 static void remove_dependence PARAMS ((rtx, rtx));
80 static void set_sched_group_p PARAMS ((rtx));
82 static void flush_pending_lists PARAMS ((struct deps *, rtx, int));
83 static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
84 static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
85 static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
86 static rtx group_leader PARAMS ((rtx));
88 static rtx get_condition PARAMS ((rtx));
89 static int conditions_mutex_p PARAMS ((rtx, rtx));
91 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
93 static int
94 deps_may_trap_p (mem)
95 rtx mem;
97 rtx addr = XEXP (mem, 0);
99 if (REG_P (addr)
100 && REGNO (addr) >= FIRST_PSEUDO_REGISTER
101 && reg_known_value[REGNO (addr)])
102 addr = reg_known_value[REGNO (addr)];
103 return rtx_addr_can_trap_p (addr);
106 /* Return the INSN_LIST containing INSN in LIST, or NULL
107 if LIST does not contain INSN. */
109 HAIFA_INLINE rtx
110 find_insn_list (insn, list)
111 rtx insn;
112 rtx list;
114 while (list)
116 if (XEXP (list, 0) == insn)
117 return list;
118 list = XEXP (list, 1);
120 return 0;
123 /* Return 1 if the pair (insn, x) is found in (LIST, LIST1), or 0
124 otherwise. */
126 HAIFA_INLINE int
127 find_insn_mem_list (insn, x, list, list1)
128 rtx insn, x;
129 rtx list, list1;
131 while (list)
133 if (XEXP (list, 0) == insn
134 && XEXP (list1, 0) == x)
135 return 1;
136 list = XEXP (list, 1);
137 list1 = XEXP (list1, 1);
139 return 0;
142 /* Find the condition under which INSN is executed. */
144 static rtx
145 get_condition (insn)
146 rtx insn;
148 rtx pat = PATTERN (insn);
149 rtx cond;
151 if (pat == 0)
152 return 0;
153 if (GET_CODE (pat) == COND_EXEC)
154 return COND_EXEC_TEST (pat);
155 if (GET_CODE (insn) != JUMP_INSN)
156 return 0;
157 if (GET_CODE (pat) != SET || SET_SRC (pat) != pc_rtx)
158 return 0;
159 if (GET_CODE (SET_DEST (pat)) != IF_THEN_ELSE)
160 return 0;
161 pat = SET_DEST (pat);
162 cond = XEXP (pat, 0);
163 if (GET_CODE (XEXP (cond, 1)) == LABEL_REF
164 && XEXP (cond, 2) == pc_rtx)
165 return cond;
166 else if (GET_CODE (XEXP (cond, 2)) == LABEL_REF
167 && XEXP (cond, 1) == pc_rtx)
168 return gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)), GET_MODE (cond),
169 XEXP (cond, 0), XEXP (cond, 1));
170 else
171 return 0;
174 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
176 static int
177 conditions_mutex_p (cond1, cond2)
178 rtx cond1, cond2;
180 if (GET_RTX_CLASS (GET_CODE (cond1)) == '<'
181 && GET_RTX_CLASS (GET_CODE (cond2)) == '<'
182 && GET_CODE (cond1) == reverse_condition (GET_CODE (cond2))
183 && XEXP (cond1, 0) == XEXP (cond2, 0)
184 && XEXP (cond1, 1) == XEXP (cond2, 1))
185 return 1;
186 return 0;
189 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
190 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
191 of dependence that this link represents. */
193 void
194 add_dependence (insn, elem, dep_type)
195 rtx insn;
196 rtx elem;
197 enum reg_note dep_type;
199 rtx link, next;
200 int present_p;
201 enum reg_note present_dep_type;
202 rtx cond1, cond2;
204 /* Don't depend an insn on itself. */
205 if (insn == elem)
206 return;
208 /* We can get a dependency on deleted insns due to optimizations in
209 the register allocation and reloading or due to splitting. Any
210 such dependency is useless and can be ignored. */
211 if (GET_CODE (elem) == NOTE)
212 return;
214 /* flow.c doesn't handle conditional lifetimes entirely correctly;
215 calls mess up the conditional lifetimes. */
216 if (GET_CODE (insn) != CALL_INSN && GET_CODE (elem) != CALL_INSN)
218 cond1 = get_condition (insn);
219 cond2 = get_condition (elem);
220 if (cond1 && cond2 && conditions_mutex_p (cond1, cond2))
221 return;
224 /* If elem is part of a sequence that must be scheduled together, then
225 make the dependence point to the last insn of the sequence.
226 When HAVE_cc0, it is possible for NOTEs to exist between users and
227 setters of the condition codes, so we must skip past notes here.
228 Otherwise, NOTEs are impossible here. */
229 next = next_nonnote_insn (elem);
230 if (next && SCHED_GROUP_P (next)
231 && GET_CODE (next) != CODE_LABEL)
233 /* Notes will never intervene here though, so don't bother checking
234 for them. */
235 /* Hah! Wrong. */
236 /* We must reject CODE_LABELs, so that we don't get confused by one
237 that has LABEL_PRESERVE_P set, which is represented by the same
238 bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
239 SCHED_GROUP_P. */
241 rtx nnext;
242 while ((nnext = next_nonnote_insn (next)) != NULL
243 && SCHED_GROUP_P (nnext)
244 && GET_CODE (nnext) != CODE_LABEL)
245 next = nnext;
247 /* Again, don't depend an insn on itself. */
248 if (insn == next)
249 return;
251 /* Make the dependence to NEXT, the last insn of the group, instead
252 of the original ELEM. */
253 elem = next;
256 present_p = 1;
257 #ifdef INSN_SCHEDULING
258 /* ??? No good way to tell from here whether we're doing interblock
259 scheduling. Possibly add another callback. */
260 #if 0
261 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
262 No need for interblock dependences with calls, since
263 calls are not moved between blocks. Note: the edge where
264 elem is a CALL is still required. */
265 if (GET_CODE (insn) == CALL_INSN
266 && (INSN_BB (elem) != INSN_BB (insn)))
267 return;
268 #endif
270 /* If we already have a dependency for ELEM, then we do not need to
271 do anything. Avoiding the list walk below can cut compile times
272 dramatically for some code. */
273 if (true_dependency_cache != NULL)
275 if (anti_dependency_cache == NULL || output_dependency_cache == NULL)
276 abort ();
277 if (TEST_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem)))
278 present_dep_type = 0;
279 else if (TEST_BIT (anti_dependency_cache[INSN_LUID (insn)],
280 INSN_LUID (elem)))
281 present_dep_type = REG_DEP_ANTI;
282 else if (TEST_BIT (output_dependency_cache[INSN_LUID (insn)],
283 INSN_LUID (elem)))
284 present_dep_type = REG_DEP_OUTPUT;
285 else
286 present_p = 0;
287 if (present_p && (int) dep_type >= (int) present_dep_type)
288 return;
290 #endif
292 /* Check that we don't already have this dependence. */
293 if (present_p)
294 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
295 if (XEXP (link, 0) == elem)
297 #ifdef INSN_SCHEDULING
298 /* Clear corresponding cache entry because type of the link
299 may be changed. */
300 if (true_dependency_cache != NULL)
302 if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
303 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
304 INSN_LUID (elem));
305 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
306 && output_dependency_cache)
307 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
308 INSN_LUID (elem));
309 else
310 abort ();
312 #endif
314 /* If this is a more restrictive type of dependence than the existing
315 one, then change the existing dependence to this type. */
316 if ((int) dep_type < (int) REG_NOTE_KIND (link))
317 PUT_REG_NOTE_KIND (link, dep_type);
319 #ifdef INSN_SCHEDULING
320 /* If we are adding a dependency to INSN's LOG_LINKs, then
321 note that in the bitmap caches of dependency information. */
322 if (true_dependency_cache != NULL)
324 if ((int)REG_NOTE_KIND (link) == 0)
325 SET_BIT (true_dependency_cache[INSN_LUID (insn)],
326 INSN_LUID (elem));
327 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
328 SET_BIT (anti_dependency_cache[INSN_LUID (insn)],
329 INSN_LUID (elem));
330 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
331 SET_BIT (output_dependency_cache[INSN_LUID (insn)],
332 INSN_LUID (elem));
334 #endif
335 return;
337 /* Might want to check one level of transitivity to save conses. */
339 link = alloc_INSN_LIST (elem, LOG_LINKS (insn));
340 LOG_LINKS (insn) = link;
342 /* Insn dependency, not data dependency. */
343 PUT_REG_NOTE_KIND (link, dep_type);
345 #ifdef INSN_SCHEDULING
346 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
347 in the bitmap caches of dependency information. */
348 if (true_dependency_cache != NULL)
350 if ((int)dep_type == 0)
351 SET_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
352 else if (dep_type == REG_DEP_ANTI)
353 SET_BIT (anti_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
354 else if (dep_type == REG_DEP_OUTPUT)
355 SET_BIT (output_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
357 #endif
360 /* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
361 of INSN. Abort if not found. */
363 static void
364 remove_dependence (insn, elem)
365 rtx insn;
366 rtx elem;
368 rtx prev, link, next;
369 int found = 0;
371 for (prev = 0, link = LOG_LINKS (insn); link; link = next)
373 next = XEXP (link, 1);
374 if (XEXP (link, 0) == elem)
376 if (prev)
377 XEXP (prev, 1) = next;
378 else
379 LOG_LINKS (insn) = next;
381 #ifdef INSN_SCHEDULING
382 /* If we are removing a dependency from the LOG_LINKS list,
383 make sure to remove it from the cache too. */
384 if (true_dependency_cache != NULL)
386 if (REG_NOTE_KIND (link) == 0)
387 RESET_BIT (true_dependency_cache[INSN_LUID (insn)],
388 INSN_LUID (elem));
389 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
390 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
391 INSN_LUID (elem));
392 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
393 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
394 INSN_LUID (elem));
396 #endif
398 free_INSN_LIST_node (link);
400 found = 1;
402 else
403 prev = link;
406 if (!found)
407 abort ();
408 return;
411 /* Return an insn which represents a SCHED_GROUP, which is
412 the last insn in the group. */
414 static rtx
415 group_leader (insn)
416 rtx insn;
418 rtx prev;
422 prev = insn;
423 insn = next_nonnote_insn (insn);
425 while (insn && SCHED_GROUP_P (insn) && (GET_CODE (insn) != CODE_LABEL));
427 return prev;
430 /* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
431 goes along with that. */
433 static void
434 set_sched_group_p (insn)
435 rtx insn;
437 rtx link, prev;
439 SCHED_GROUP_P (insn) = 1;
441 /* There may be a note before this insn now, but all notes will
442 be removed before we actually try to schedule the insns, so
443 it won't cause a problem later. We must avoid it here though. */
444 prev = prev_nonnote_insn (insn);
446 /* Make a copy of all dependencies on the immediately previous insn,
447 and add to this insn. This is so that all the dependencies will
448 apply to the group. Remove an explicit dependence on this insn
449 as SCHED_GROUP_P now represents it. */
451 if (find_insn_list (prev, LOG_LINKS (insn)))
452 remove_dependence (insn, prev);
454 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
455 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
458 /* Process an insn's memory dependencies. There are four kinds of
459 dependencies:
461 (0) read dependence: read follows read
462 (1) true dependence: read follows write
463 (2) anti dependence: write follows read
464 (3) output dependence: write follows write
466 We are careful to build only dependencies which actually exist, and
467 use transitivity to avoid building too many links. */
469 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
470 The MEM is a memory reference contained within INSN, which we are saving
471 so that we can do memory aliasing on it. */
473 void
474 add_insn_mem_dependence (deps, insn_list, mem_list, insn, mem)
475 struct deps *deps;
476 rtx *insn_list, *mem_list, insn, mem;
478 register rtx link;
480 link = alloc_INSN_LIST (insn, *insn_list);
481 *insn_list = link;
483 link = alloc_EXPR_LIST (VOIDmode, mem, *mem_list);
484 *mem_list = link;
486 deps->pending_lists_length++;
489 /* Make a dependency between every memory reference on the pending lists
490 and INSN, thus flushing the pending lists. If ONLY_WRITE, don't flush
491 the read list. */
493 static void
494 flush_pending_lists (deps, insn, only_write)
495 struct deps *deps;
496 rtx insn;
497 int only_write;
499 rtx u;
500 rtx link;
502 while (deps->pending_read_insns && ! only_write)
504 add_dependence (insn, XEXP (deps->pending_read_insns, 0),
505 REG_DEP_ANTI);
507 link = deps->pending_read_insns;
508 deps->pending_read_insns = XEXP (deps->pending_read_insns, 1);
509 free_INSN_LIST_node (link);
511 link = deps->pending_read_mems;
512 deps->pending_read_mems = XEXP (deps->pending_read_mems, 1);
513 free_EXPR_LIST_node (link);
515 while (deps->pending_write_insns)
517 add_dependence (insn, XEXP (deps->pending_write_insns, 0),
518 REG_DEP_ANTI);
520 link = deps->pending_write_insns;
521 deps->pending_write_insns = XEXP (deps->pending_write_insns, 1);
522 free_INSN_LIST_node (link);
524 link = deps->pending_write_mems;
525 deps->pending_write_mems = XEXP (deps->pending_write_mems, 1);
526 free_EXPR_LIST_node (link);
528 deps->pending_lists_length = 0;
530 /* last_pending_memory_flush is now a list of insns. */
531 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
532 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
534 free_INSN_LIST_list (&deps->last_pending_memory_flush);
535 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
536 deps->pending_flush_length = 1;
539 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
540 rtx, X, creating all dependencies generated by the write to the
541 destination of X, and reads of everything mentioned. */
543 static void
544 sched_analyze_1 (deps, x, insn)
545 struct deps *deps;
546 rtx x;
547 rtx insn;
549 register int regno;
550 register rtx dest = XEXP (x, 0);
551 enum rtx_code code = GET_CODE (x);
553 if (dest == 0)
554 return;
556 if (GET_CODE (dest) == PARALLEL)
558 register int i;
560 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
561 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
562 sched_analyze_1 (deps,
563 gen_rtx_CLOBBER (VOIDmode,
564 XEXP (XVECEXP (dest, 0, i), 0)),
565 insn);
567 if (GET_CODE (x) == SET)
568 sched_analyze_2 (deps, SET_SRC (x), insn);
569 return;
572 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
573 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
575 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
577 /* The second and third arguments are values read by this insn. */
578 sched_analyze_2 (deps, XEXP (dest, 1), insn);
579 sched_analyze_2 (deps, XEXP (dest, 2), insn);
581 dest = XEXP (dest, 0);
584 if (GET_CODE (dest) == REG)
586 register int i;
588 regno = REGNO (dest);
590 /* A hard reg in a wide mode may really be multiple registers.
591 If so, mark all of them just like the first. */
592 if (regno < FIRST_PSEUDO_REGISTER)
594 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
595 while (--i >= 0)
597 int r = regno + i;
598 rtx u;
600 for (u = deps->reg_last[r].uses; u; u = XEXP (u, 1))
601 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
603 for (u = deps->reg_last[r].sets; u; u = XEXP (u, 1))
604 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
606 /* Clobbers need not be ordered with respect to one
607 another, but sets must be ordered with respect to a
608 pending clobber. */
609 if (code == SET)
611 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
612 free_INSN_LIST_list (&deps->reg_last[r].uses);
613 for (u = deps->reg_last[r].clobbers; u; u = XEXP (u, 1))
614 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
615 SET_REGNO_REG_SET (reg_pending_sets, r);
617 else
618 SET_REGNO_REG_SET (reg_pending_clobbers, r);
620 /* Function calls clobber all call_used regs. */
621 if (global_regs[r] || (code == SET && call_used_regs[r]))
622 for (u = deps->last_function_call; u; u = XEXP (u, 1))
623 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
626 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
627 it does not reload. Ignore these as they have served their
628 purpose already. */
629 else if (regno >= deps->max_reg)
631 if (GET_CODE (PATTERN (insn)) != USE
632 && GET_CODE (PATTERN (insn)) != CLOBBER)
633 abort ();
635 else
637 rtx u;
639 for (u = deps->reg_last[regno].uses; u; u = XEXP (u, 1))
640 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
642 for (u = deps->reg_last[regno].sets; u; u = XEXP (u, 1))
643 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
645 if (code == SET)
647 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
648 free_INSN_LIST_list (&deps->reg_last[regno].uses);
649 for (u = deps->reg_last[regno].clobbers; u; u = XEXP (u, 1))
650 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
651 SET_REGNO_REG_SET (reg_pending_sets, regno);
653 else
654 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
656 /* Pseudos that are REG_EQUIV to something may be replaced
657 by that during reloading. We need only add dependencies for
658 the address in the REG_EQUIV note. */
659 if (!reload_completed
660 && reg_known_equiv_p[regno]
661 && GET_CODE (reg_known_value[regno]) == MEM)
662 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
664 /* Don't let it cross a call after scheduling if it doesn't
665 already cross one. */
667 if (REG_N_CALLS_CROSSED (regno) == 0)
668 for (u = deps->last_function_call; u; u = XEXP (u, 1))
669 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
672 else if (GET_CODE (dest) == MEM)
674 /* Writing memory. */
676 if (deps->pending_lists_length > MAX_PENDING_LIST_LENGTH)
678 /* Flush all pending reads and writes to prevent the pending lists
679 from getting any larger. Insn scheduling runs too slowly when
680 these lists get long. When compiling GCC with itself,
681 this flush occurs 8 times for sparc, and 10 times for m88k using
682 the default value of 32. */
683 flush_pending_lists (deps, insn, 0);
685 else
687 rtx u;
688 rtx pending, pending_mem;
690 pending = deps->pending_read_insns;
691 pending_mem = deps->pending_read_mems;
692 while (pending)
694 if (anti_dependence (XEXP (pending_mem, 0), dest))
695 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
697 pending = XEXP (pending, 1);
698 pending_mem = XEXP (pending_mem, 1);
701 pending = deps->pending_write_insns;
702 pending_mem = deps->pending_write_mems;
703 while (pending)
705 if (output_dependence (XEXP (pending_mem, 0), dest))
706 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
708 pending = XEXP (pending, 1);
709 pending_mem = XEXP (pending_mem, 1);
712 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
713 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
715 add_insn_mem_dependence (deps, &deps->pending_write_insns,
716 &deps->pending_write_mems, insn, dest);
718 sched_analyze_2 (deps, XEXP (dest, 0), insn);
721 /* Analyze reads. */
722 if (GET_CODE (x) == SET)
723 sched_analyze_2 (deps, SET_SRC (x), insn);
726 /* Analyze the uses of memory and registers in rtx X in INSN. */
728 static void
729 sched_analyze_2 (deps, x, insn)
730 struct deps *deps;
731 rtx x;
732 rtx insn;
734 register int i;
735 register int j;
736 register enum rtx_code code;
737 register const char *fmt;
739 if (x == 0)
740 return;
742 code = GET_CODE (x);
744 switch (code)
746 case CONST_INT:
747 case CONST_DOUBLE:
748 case SYMBOL_REF:
749 case CONST:
750 case LABEL_REF:
751 /* Ignore constants. Note that we must handle CONST_DOUBLE here
752 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
753 this does not mean that this insn is using cc0. */
754 return;
756 #ifdef HAVE_cc0
757 case CC0:
758 /* User of CC0 depends on immediately preceding insn. */
759 set_sched_group_p (insn);
760 return;
761 #endif
763 case REG:
765 rtx u;
766 int regno = REGNO (x);
767 if (regno < FIRST_PSEUDO_REGISTER)
769 int i;
771 i = HARD_REGNO_NREGS (regno, GET_MODE (x));
772 while (--i >= 0)
774 int r = regno + i;
775 deps->reg_last[r].uses
776 = alloc_INSN_LIST (insn, deps->reg_last[r].uses);
777 SET_REGNO_REG_SET (&deps->reg_last_in_use, r);
779 for (u = deps->reg_last[r].sets; u; u = XEXP (u, 1))
780 add_dependence (insn, XEXP (u, 0), 0);
782 /* ??? This should never happen. */
783 for (u = deps->reg_last[r].clobbers; u; u = XEXP (u, 1))
784 add_dependence (insn, XEXP (u, 0), 0);
786 if (call_used_regs[r] || global_regs[r])
787 /* Function calls clobber all call_used regs. */
788 for (u = deps->last_function_call; u; u = XEXP (u, 1))
789 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
792 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
793 it does not reload. Ignore these as they have served their
794 purpose already. */
795 else if (regno >= deps->max_reg)
797 if (GET_CODE (PATTERN (insn)) != USE
798 && GET_CODE (PATTERN (insn)) != CLOBBER)
799 abort ();
801 else
803 deps->reg_last[regno].uses
804 = alloc_INSN_LIST (insn, deps->reg_last[regno].uses);
805 SET_REGNO_REG_SET (&deps->reg_last_in_use, regno);
807 for (u = deps->reg_last[regno].sets; u; u = XEXP (u, 1))
808 add_dependence (insn, XEXP (u, 0), 0);
810 /* ??? This should never happen. */
811 for (u = deps->reg_last[regno].clobbers; u; u = XEXP (u, 1))
812 add_dependence (insn, XEXP (u, 0), 0);
814 /* Pseudos that are REG_EQUIV to something may be replaced
815 by that during reloading. We need only add dependencies for
816 the address in the REG_EQUIV note. */
817 if (!reload_completed
818 && reg_known_equiv_p[regno]
819 && GET_CODE (reg_known_value[regno]) == MEM)
820 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
822 /* If the register does not already cross any calls, then add this
823 insn to the sched_before_next_call list so that it will still
824 not cross calls after scheduling. */
825 if (REG_N_CALLS_CROSSED (regno) == 0)
826 add_dependence (deps->sched_before_next_call, insn,
827 REG_DEP_ANTI);
829 return;
832 case MEM:
834 /* Reading memory. */
835 rtx u;
836 rtx pending, pending_mem;
838 pending = deps->pending_read_insns;
839 pending_mem = deps->pending_read_mems;
840 while (pending)
842 if (read_dependence (XEXP (pending_mem, 0), x))
843 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
845 pending = XEXP (pending, 1);
846 pending_mem = XEXP (pending_mem, 1);
849 pending = deps->pending_write_insns;
850 pending_mem = deps->pending_write_mems;
851 while (pending)
853 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
854 x, rtx_varies_p))
855 add_dependence (insn, XEXP (pending, 0), 0);
857 pending = XEXP (pending, 1);
858 pending_mem = XEXP (pending_mem, 1);
861 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
862 if (GET_CODE (XEXP (u, 0)) != JUMP_INSN
863 || deps_may_trap_p (x))
864 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
866 /* Always add these dependencies to pending_reads, since
867 this insn may be followed by a write. */
868 add_insn_mem_dependence (deps, &deps->pending_read_insns,
869 &deps->pending_read_mems, insn, x);
871 /* Take advantage of tail recursion here. */
872 sched_analyze_2 (deps, XEXP (x, 0), insn);
873 return;
876 /* Force pending stores to memory in case a trap handler needs them. */
877 case TRAP_IF:
878 flush_pending_lists (deps, insn, 1);
879 break;
881 case ASM_OPERANDS:
882 case ASM_INPUT:
883 case UNSPEC_VOLATILE:
885 rtx u;
887 /* Traditional and volatile asm instructions must be considered to use
888 and clobber all hard registers, all pseudo-registers and all of
889 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
891 Consider for instance a volatile asm that changes the fpu rounding
892 mode. An insn should not be moved across this even if it only uses
893 pseudo-regs because it might give an incorrectly rounded result. */
894 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
896 for (i = 0; i < deps->max_reg; i++)
898 struct deps_reg *reg_last = &deps->reg_last[i];
900 for (u = reg_last->uses; u; u = XEXP (u, 1))
901 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
902 for (u = reg_last->sets; u; u = XEXP (u, 1))
903 add_dependence (insn, XEXP (u, 0), 0);
904 for (u = reg_last->clobbers; u; u = XEXP (u, 1))
905 add_dependence (insn, XEXP (u, 0), 0);
907 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
908 free_INSN_LIST_list (&reg_last->uses);
910 reg_pending_sets_all = 1;
912 flush_pending_lists (deps, insn, 0);
915 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
916 We can not just fall through here since then we would be confused
917 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
918 traditional asms unlike their normal usage. */
920 if (code == ASM_OPERANDS)
922 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
923 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
924 return;
926 break;
929 case PRE_DEC:
930 case POST_DEC:
931 case PRE_INC:
932 case POST_INC:
933 /* These both read and modify the result. We must handle them as writes
934 to get proper dependencies for following instructions. We must handle
935 them as reads to get proper dependencies from this to previous
936 instructions. Thus we need to pass them to both sched_analyze_1
937 and sched_analyze_2. We must call sched_analyze_2 first in order
938 to get the proper antecedent for the read. */
939 sched_analyze_2 (deps, XEXP (x, 0), insn);
940 sched_analyze_1 (deps, x, insn);
941 return;
943 case POST_MODIFY:
944 case PRE_MODIFY:
945 /* op0 = op0 + op1 */
946 sched_analyze_2 (deps, XEXP (x, 0), insn);
947 sched_analyze_2 (deps, XEXP (x, 1), insn);
948 sched_analyze_1 (deps, x, insn);
949 return;
951 default:
952 break;
955 /* Other cases: walk the insn. */
956 fmt = GET_RTX_FORMAT (code);
957 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
959 if (fmt[i] == 'e')
960 sched_analyze_2 (deps, XEXP (x, i), insn);
961 else if (fmt[i] == 'E')
962 for (j = 0; j < XVECLEN (x, i); j++)
963 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
967 /* Analyze an INSN with pattern X to find all dependencies. */
969 static void
970 sched_analyze_insn (deps, x, insn, loop_notes)
971 struct deps *deps;
972 rtx x, insn;
973 rtx loop_notes;
975 register RTX_CODE code = GET_CODE (x);
976 int schedule_barrier_found = 0;
977 rtx link;
978 int i;
980 if (code == COND_EXEC)
982 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
984 /* ??? Should be recording conditions so we reduce the number of
985 false dependancies. */
986 x = COND_EXEC_CODE (x);
987 code = GET_CODE (x);
989 if (code == SET || code == CLOBBER)
990 sched_analyze_1 (deps, x, insn);
991 else if (code == PARALLEL)
993 register int i;
994 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
996 rtx sub = XVECEXP (x, 0, i);
997 code = GET_CODE (sub);
999 if (code == COND_EXEC)
1001 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
1002 sub = COND_EXEC_CODE (sub);
1003 code = GET_CODE (sub);
1005 if (code == SET || code == CLOBBER)
1006 sched_analyze_1 (deps, sub, insn);
1007 else
1008 sched_analyze_2 (deps, sub, insn);
1011 else
1012 sched_analyze_2 (deps, x, insn);
1014 /* Mark registers CLOBBERED or used by called function. */
1015 if (GET_CODE (insn) == CALL_INSN)
1016 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1018 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
1019 sched_analyze_1 (deps, XEXP (link, 0), insn);
1020 else
1021 sched_analyze_2 (deps, XEXP (link, 0), insn);
1024 if (GET_CODE (insn) == JUMP_INSN)
1026 rtx next;
1027 next = next_nonnote_insn (insn);
1028 if (next && GET_CODE (next) == BARRIER)
1029 schedule_barrier_found = 1;
1030 else
1032 rtx pending, pending_mem, u;
1033 regset_head tmp;
1034 INIT_REG_SET (&tmp);
1036 (*current_sched_info->compute_jump_reg_dependencies) (insn, &tmp);
1037 EXECUTE_IF_SET_IN_REG_SET (&tmp, 0, i,
1039 struct deps_reg *reg_last = &deps->reg_last[i];
1040 for (u = reg_last->sets; u; u = XEXP (u, 1))
1041 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1042 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1043 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1046 CLEAR_REG_SET (&tmp);
1048 /* All memory writes and volatile reads must happen before the
1049 jump. Non-volatile reads must happen before the jump iff
1050 the result is needed by the above register used mask. */
1052 pending = deps->pending_write_insns;
1053 pending_mem = deps->pending_write_mems;
1054 while (pending)
1056 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1057 pending = XEXP (pending, 1);
1058 pending_mem = XEXP (pending_mem, 1);
1061 pending = deps->pending_read_insns;
1062 pending_mem = deps->pending_read_mems;
1063 while (pending)
1065 if (MEM_VOLATILE_P (XEXP (pending_mem, 0)))
1066 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1067 pending = XEXP (pending, 1);
1068 pending_mem = XEXP (pending_mem, 1);
1071 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
1072 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1076 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
1077 block, then we must be sure that no instructions are scheduled across it.
1078 Otherwise, the reg_n_refs info (which depends on loop_depth) would
1079 become incorrect. */
1080 if (loop_notes)
1082 rtx link;
1084 /* Update loop_notes with any notes from this insn. Also determine
1085 if any of the notes on the list correspond to instruction scheduling
1086 barriers (loop, eh & setjmp notes, but not range notes). */
1087 link = loop_notes;
1088 while (XEXP (link, 1))
1090 if (INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_BEG
1091 || INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END
1092 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_BEG
1093 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END
1094 || INTVAL (XEXP (link, 0)) == NOTE_INSN_SETJMP)
1095 schedule_barrier_found = 1;
1097 link = XEXP (link, 1);
1099 XEXP (link, 1) = REG_NOTES (insn);
1100 REG_NOTES (insn) = loop_notes;
1103 /* If this instruction can throw an exception, then moving it changes
1104 where block boundaries fall. This is mighty confusing elsewhere.
1105 Therefore, prevent such an instruction from being moved. */
1106 if (flag_non_call_exceptions && can_throw_internal (insn))
1107 schedule_barrier_found = 1;
1109 /* Add dependencies if a scheduling barrier was found. */
1110 if (schedule_barrier_found)
1112 rtx u;
1114 for (i = 0; i < deps->max_reg; i++)
1116 struct deps_reg *reg_last = &deps->reg_last[i];
1118 for (u = reg_last->uses; u; u = XEXP (u, 1))
1119 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1120 for (u = reg_last->sets; u; u = XEXP (u, 1))
1121 add_dependence (insn, XEXP (u, 0), 0);
1122 for (u = reg_last->clobbers; u; u = XEXP (u, 1))
1123 add_dependence (insn, XEXP (u, 0), 0);
1125 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1126 free_INSN_LIST_list (&reg_last->uses);
1128 flush_pending_lists (deps, insn, 0);
1130 reg_pending_sets_all = 1;
1133 /* Accumulate clobbers until the next set so that it will be output
1134 dependent on all of them. At the next set we can clear the clobber
1135 list, since subsequent sets will be output dependent on it. */
1136 if (reg_pending_sets_all)
1138 reg_pending_sets_all = 0;
1139 for (i = 0; i < deps->max_reg; i++)
1141 struct deps_reg *reg_last = &deps->reg_last[i];
1142 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1144 free_INSN_LIST_list (&reg_last->sets);
1145 free_INSN_LIST_list (&reg_last->clobbers);
1147 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1148 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1151 else
1153 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
1155 struct deps_reg *reg_last = &deps->reg_last[i];
1156 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1158 free_INSN_LIST_list (&reg_last->sets);
1159 free_INSN_LIST_list (&reg_last->clobbers);
1161 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1162 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1164 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
1166 struct deps_reg *reg_last = &deps->reg_last[i];
1167 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1168 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1171 CLEAR_REG_SET (reg_pending_sets);
1172 CLEAR_REG_SET (reg_pending_clobbers);
1174 /* If a post-call group is still open, see if it should remain so.
1175 This insn must be a simple move of a hard reg to a pseudo or
1176 vice-versa.
1178 We must avoid moving these insns for correctness on
1179 SMALL_REGISTER_CLASS machines, and for special registers like
1180 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1181 hard regs for all targets. */
1183 if (deps->in_post_call_group_p)
1185 rtx tmp, set = single_set (insn);
1186 int src_regno, dest_regno;
1188 if (set == NULL)
1189 goto end_call_group;
1191 tmp = SET_DEST (set);
1192 if (GET_CODE (tmp) == SUBREG)
1193 tmp = SUBREG_REG (tmp);
1194 if (GET_CODE (tmp) == REG)
1195 dest_regno = REGNO (tmp);
1196 else
1197 goto end_call_group;
1199 tmp = SET_SRC (set);
1200 if (GET_CODE (tmp) == SUBREG)
1201 tmp = SUBREG_REG (tmp);
1202 if (GET_CODE (tmp) == REG)
1203 src_regno = REGNO (tmp);
1204 else
1205 goto end_call_group;
1207 if (src_regno < FIRST_PSEUDO_REGISTER
1208 || dest_regno < FIRST_PSEUDO_REGISTER)
1210 set_sched_group_p (insn);
1211 CANT_MOVE (insn) = 1;
1213 else
1215 end_call_group:
1216 deps->in_post_call_group_p = 0;
1221 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1222 for every dependency. */
1224 void
1225 sched_analyze (deps, head, tail)
1226 struct deps *deps;
1227 rtx head, tail;
1229 register rtx insn;
1230 register rtx u;
1231 rtx loop_notes = 0;
1233 for (insn = head;; insn = NEXT_INSN (insn))
1235 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1237 /* Clear out the stale LOG_LINKS from flow. */
1238 free_INSN_LIST_list (&LOG_LINKS (insn));
1240 /* Clear out stale SCHED_GROUP_P. */
1241 SCHED_GROUP_P (insn) = 0;
1243 /* Make each JUMP_INSN a scheduling barrier for memory
1244 references. */
1245 if (GET_CODE (insn) == JUMP_INSN)
1247 /* Keep the list a reasonable size. */
1248 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
1249 flush_pending_lists (deps, insn, 0);
1250 else
1251 deps->last_pending_memory_flush
1252 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
1254 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1255 loop_notes = 0;
1257 else if (GET_CODE (insn) == CALL_INSN)
1259 rtx x;
1260 register int i;
1262 /* Clear out stale SCHED_GROUP_P. */
1263 SCHED_GROUP_P (insn) = 0;
1265 CANT_MOVE (insn) = 1;
1267 /* Clear out the stale LOG_LINKS from flow. */
1268 free_INSN_LIST_list (&LOG_LINKS (insn));
1270 /* Any instruction using a hard register which may get clobbered
1271 by a call needs to be marked as dependent on this call.
1272 This prevents a use of a hard return reg from being moved
1273 past a void call (i.e. it does not explicitly set the hard
1274 return reg). */
1276 /* If this call is followed by a NOTE_INSN_SETJMP, then assume that
1277 all registers, not just hard registers, may be clobbered by this
1278 call. */
1280 /* Insn, being a CALL_INSN, magically depends on
1281 `last_function_call' already. */
1283 if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == NOTE
1284 && NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP)
1286 for (i = 0; i < deps->max_reg; i++)
1288 struct deps_reg *reg_last = &deps->reg_last[i];
1290 for (u = reg_last->uses; u; u = XEXP (u, 1))
1291 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1292 for (u = reg_last->sets; u; u = XEXP (u, 1))
1293 add_dependence (insn, XEXP (u, 0), 0);
1294 for (u = reg_last->clobbers; u; u = XEXP (u, 1))
1295 add_dependence (insn, XEXP (u, 0), 0);
1297 free_INSN_LIST_list (&reg_last->uses);
1299 reg_pending_sets_all = 1;
1301 /* Add a pair of REG_SAVE_NOTEs which we will later
1302 convert back into a NOTE_INSN_SETJMP note. See
1303 reemit_notes for why we use a pair of NOTEs. */
1304 REG_NOTES (insn) = alloc_EXPR_LIST (REG_SAVE_NOTE,
1305 GEN_INT (0),
1306 REG_NOTES (insn));
1307 REG_NOTES (insn) = alloc_EXPR_LIST (REG_SAVE_NOTE,
1308 GEN_INT (NOTE_INSN_SETJMP),
1309 REG_NOTES (insn));
1311 else
1313 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1314 if (call_used_regs[i] || global_regs[i])
1316 for (u = deps->reg_last[i].uses; u; u = XEXP (u, 1))
1317 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1318 for (u = deps->reg_last[i].sets; u; u = XEXP (u, 1))
1319 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1321 SET_REGNO_REG_SET (reg_pending_clobbers, i);
1325 /* For each insn which shouldn't cross a call, add a dependence
1326 between that insn and this call insn. */
1327 x = LOG_LINKS (deps->sched_before_next_call);
1328 while (x)
1330 add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
1331 x = XEXP (x, 1);
1333 free_INSN_LIST_list (&LOG_LINKS (deps->sched_before_next_call));
1335 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1336 loop_notes = 0;
1338 /* In the absence of interprocedural alias analysis, we must flush
1339 all pending reads and writes, and start new dependencies starting
1340 from here. But only flush writes for constant calls (which may
1341 be passed a pointer to something we haven't written yet). */
1342 flush_pending_lists (deps, insn, CONST_CALL_P (insn));
1344 /* Depend this function call (actually, the user of this
1345 function call) on all hard register clobberage. */
1347 /* last_function_call is now a list of insns. */
1348 free_INSN_LIST_list (&deps->last_function_call);
1349 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
1351 /* Before reload, begin a post-call group, so as to keep the
1352 lifetimes of hard registers correct. */
1353 if (! reload_completed)
1354 deps->in_post_call_group_p = 1;
1357 /* See comments on reemit_notes as to why we do this.
1358 ??? Actually, the reemit_notes just say what is done, not why. */
1360 else if (GET_CODE (insn) == NOTE
1361 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_BEG
1362 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_END))
1364 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE, NOTE_RANGE_INFO (insn),
1365 loop_notes);
1366 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1367 GEN_INT (NOTE_LINE_NUMBER (insn)),
1368 loop_notes);
1370 else if (GET_CODE (insn) == NOTE
1371 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
1372 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
1373 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1374 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END
1375 || (NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP
1376 && GET_CODE (PREV_INSN (insn)) != CALL_INSN)))
1378 rtx rtx_region;
1380 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1381 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END)
1382 rtx_region = GEN_INT (NOTE_EH_HANDLER (insn));
1383 else
1384 rtx_region = GEN_INT (0);
1386 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1387 rtx_region,
1388 loop_notes);
1389 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1390 GEN_INT (NOTE_LINE_NUMBER (insn)),
1391 loop_notes);
1392 CONST_CALL_P (loop_notes) = CONST_CALL_P (insn);
1395 if (insn == tail)
1396 return;
1398 abort ();
1401 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1402 dependences from LOG_LINKS to build forward dependences in
1403 INSN_DEPEND. */
1405 void
1406 compute_forward_dependences (head, tail)
1407 rtx head, tail;
1409 rtx insn, link;
1410 rtx next_tail;
1411 enum reg_note dep_type;
1413 next_tail = NEXT_INSN (tail);
1414 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1416 if (! INSN_P (insn))
1417 continue;
1419 insn = group_leader (insn);
1421 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
1423 rtx x = group_leader (XEXP (link, 0));
1424 rtx new_link;
1426 if (x != XEXP (link, 0))
1427 continue;
1429 #ifdef ENABLE_CHECKING
1430 /* If add_dependence is working properly there should never
1431 be notes, deleted insns or duplicates in the backward
1432 links. Thus we need not check for them here.
1434 However, if we have enabled checking we might as well go
1435 ahead and verify that add_dependence worked properly. */
1436 if (GET_CODE (x) == NOTE
1437 || INSN_DELETED_P (x)
1438 || (forward_dependency_cache != NULL
1439 && TEST_BIT (forward_dependency_cache[INSN_LUID (x)],
1440 INSN_LUID (insn)))
1441 || (forward_dependency_cache == NULL
1442 && find_insn_list (insn, INSN_DEPEND (x))))
1443 abort ();
1444 if (forward_dependency_cache != NULL)
1445 SET_BIT (forward_dependency_cache[INSN_LUID (x)],
1446 INSN_LUID (insn));
1447 #endif
1449 new_link = alloc_INSN_LIST (insn, INSN_DEPEND (x));
1451 dep_type = REG_NOTE_KIND (link);
1452 PUT_REG_NOTE_KIND (new_link, dep_type);
1454 INSN_DEPEND (x) = new_link;
1455 INSN_DEP_COUNT (insn) += 1;
1460 /* Initialize variables for region data dependence analysis.
1461 n_bbs is the number of region blocks. */
1463 void
1464 init_deps (deps)
1465 struct deps *deps;
1467 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
1469 deps->max_reg = max_reg;
1470 deps->reg_last = (struct deps_reg *)
1471 xcalloc (max_reg, sizeof (struct deps_reg));
1472 INIT_REG_SET (&deps->reg_last_in_use);
1474 deps->pending_read_insns = 0;
1475 deps->pending_read_mems = 0;
1476 deps->pending_write_insns = 0;
1477 deps->pending_write_mems = 0;
1478 deps->pending_lists_length = 0;
1479 deps->pending_flush_length = 0;
1480 deps->last_pending_memory_flush = 0;
1481 deps->last_function_call = 0;
1482 deps->in_post_call_group_p = 0;
1484 deps->sched_before_next_call
1485 = gen_rtx_INSN (VOIDmode, 0, NULL_RTX, NULL_RTX,
1486 NULL_RTX, 0, NULL_RTX, NULL_RTX);
1487 LOG_LINKS (deps->sched_before_next_call) = 0;
1490 /* Free insn lists found in DEPS. */
1492 void
1493 free_deps (deps)
1494 struct deps *deps;
1496 int i;
1498 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
1499 times. For a test case with 42000 regs and 8000 small basic blocks,
1500 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
1501 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
1503 struct deps_reg *reg_last = &deps->reg_last[i];
1504 free_INSN_LIST_list (&reg_last->uses);
1505 free_INSN_LIST_list (&reg_last->sets);
1506 free_INSN_LIST_list (&reg_last->clobbers);
1508 CLEAR_REG_SET (&deps->reg_last_in_use);
1510 free (deps->reg_last);
1511 deps->reg_last = NULL;
1514 /* If it is profitable to use them, initialize caches for tracking
1515 dependency informatino. LUID is the number of insns to be scheduled,
1516 it is used in the estimate of profitability. */
1518 void
1519 init_dependency_caches (luid)
1520 int luid;
1522 /* ?!? We could save some memory by computing a per-region luid mapping
1523 which could reduce both the number of vectors in the cache and the size
1524 of each vector. Instead we just avoid the cache entirely unless the
1525 average number of instructions in a basic block is very high. See
1526 the comment before the declaration of true_dependency_cache for
1527 what we consider "very high". */
1528 if (luid / n_basic_blocks > 100 * 5)
1530 true_dependency_cache = sbitmap_vector_alloc (luid, luid);
1531 sbitmap_vector_zero (true_dependency_cache, luid);
1532 anti_dependency_cache = sbitmap_vector_alloc (luid, luid);
1533 sbitmap_vector_zero (anti_dependency_cache, luid);
1534 output_dependency_cache = sbitmap_vector_alloc (luid, luid);
1535 sbitmap_vector_zero (output_dependency_cache, luid);
1536 #ifdef ENABLE_CHECKING
1537 forward_dependency_cache = sbitmap_vector_alloc (luid, luid);
1538 sbitmap_vector_zero (forward_dependency_cache, luid);
1539 #endif
1543 /* Free the caches allocated in init_dependency_caches. */
1545 void
1546 free_dependency_caches ()
1548 if (true_dependency_cache)
1550 free (true_dependency_cache);
1551 true_dependency_cache = NULL;
1552 free (anti_dependency_cache);
1553 anti_dependency_cache = NULL;
1554 free (output_dependency_cache);
1555 output_dependency_cache = NULL;
1556 #ifdef ENABLE_CHECKING
1557 free (forward_dependency_cache);
1558 forward_dependency_cache = NULL;
1559 #endif
1563 /* Initialize some global variables needed by the dependency analysis
1564 code. */
1566 void
1567 init_deps_global ()
1569 reg_pending_sets = INITIALIZE_REG_SET (reg_pending_sets_head);
1570 reg_pending_clobbers = INITIALIZE_REG_SET (reg_pending_clobbers_head);
1571 reg_pending_sets_all = 0;
1574 /* Free everything used by the dependency analysis code. */
1576 void
1577 finish_deps_global ()
1579 FREE_REG_SET (reg_pending_sets);
1580 FREE_REG_SET (reg_pending_clobbers);