Avoid unnecessary dependencies on COND_EXEC insns.
[official-gcc.git] / gcc / sched-deps.c
blob8d1e2f7ad2cf1139d377e2301d1119b2826c5c55
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GNU CC.
10 GNU CC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published by the
12 Free Software Foundation; either version 2, or (at your option) any
13 later version.
15 GNU CC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GNU CC; see the file COPYING. If not, write to the Free
22 the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA
23 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "toplev.h"
28 #include "rtl.h"
29 #include "tm_p.h"
30 #include "hard-reg-set.h"
31 #include "basic-block.h"
32 #include "regs.h"
33 #include "function.h"
34 #include "flags.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
37 #include "except.h"
38 #include "toplev.h"
39 #include "recog.h"
40 #include "sched-int.h"
42 extern char *reg_known_equiv_p;
43 extern rtx *reg_known_value;
45 static regset_head reg_pending_sets_head;
46 static regset_head reg_pending_clobbers_head;
48 static regset reg_pending_sets;
49 static regset reg_pending_clobbers;
50 static int reg_pending_sets_all;
52 /* To speed up the test for duplicate dependency links we keep a
53 record of dependencies created by add_dependence when the average
54 number of instructions in a basic block is very large.
56 Studies have shown that there is typically around 5 instructions between
57 branches for typical C code. So we can make a guess that the average
58 basic block is approximately 5 instructions long; we will choose 100X
59 the average size as a very large basic block.
61 Each insn has associated bitmaps for its dependencies. Each bitmap
62 has enough entries to represent a dependency on any other insn in
63 the insn chain. All bitmap for true dependencies cache is
64 allocated then the rest two ones are also allocated. */
65 static sbitmap *true_dependency_cache;
66 static sbitmap *anti_dependency_cache;
67 static sbitmap *output_dependency_cache;
69 /* To speed up checking consistency of formed forward insn
70 dependencies we use the following cache. Another possible solution
71 could be switching off checking duplication of insns in forward
72 dependencies. */
73 #ifdef ENABLE_CHECKING
74 static sbitmap *forward_dependency_cache;
75 #endif
77 static void remove_dependence PARAMS ((rtx, rtx));
78 static void set_sched_group_p PARAMS ((rtx));
80 static void flush_pending_lists PARAMS ((struct deps *, rtx, int));
81 static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
82 static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
83 static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
84 static rtx group_leader PARAMS ((rtx));
86 static rtx get_condition PARAMS ((rtx));
87 static int conditions_mutex_p PARAMS ((rtx, rtx));
89 /* Return the INSN_LIST containing INSN in LIST, or NULL
90 if LIST does not contain INSN. */
92 HAIFA_INLINE rtx
93 find_insn_list (insn, list)
94 rtx insn;
95 rtx list;
97 while (list)
99 if (XEXP (list, 0) == insn)
100 return list;
101 list = XEXP (list, 1);
103 return 0;
106 /* Return 1 if the pair (insn, x) is found in (LIST, LIST1), or 0
107 otherwise. */
109 HAIFA_INLINE int
110 find_insn_mem_list (insn, x, list, list1)
111 rtx insn, x;
112 rtx list, list1;
114 while (list)
116 if (XEXP (list, 0) == insn
117 && XEXP (list1, 0) == x)
118 return 1;
119 list = XEXP (list, 1);
120 list1 = XEXP (list1, 1);
122 return 0;
125 /* Find the condition under which INSN is executed. */
127 static rtx
128 get_condition (insn)
129 rtx insn;
131 rtx pat = PATTERN (insn);
132 rtx cond;
134 if (pat == 0)
135 return 0;
136 if (GET_CODE (pat) == COND_EXEC)
137 return COND_EXEC_TEST (pat);
138 if (GET_CODE (insn) != JUMP_INSN)
139 return 0;
140 if (GET_CODE (pat) != SET || SET_SRC (pat) != pc_rtx)
141 return 0;
142 if (GET_CODE (SET_DEST (pat)) != IF_THEN_ELSE)
143 return 0;
144 pat = SET_DEST (pat);
145 cond = XEXP (pat, 0);
146 if (GET_CODE (XEXP (cond, 1)) == LABEL_REF
147 && XEXP (cond, 2) == pc_rtx)
148 return cond;
149 else if (GET_CODE (XEXP (cond, 2)) == LABEL_REF
150 && XEXP (cond, 1) == pc_rtx)
151 return gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)), GET_MODE (cond),
152 XEXP (cond, 0), XEXP (cond, 1));
153 else
154 return 0;
157 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
159 static int
160 conditions_mutex_p (cond1, cond2)
161 rtx cond1, cond2;
163 if (GET_RTX_CLASS (GET_CODE (cond1)) == '<'
164 && GET_RTX_CLASS (GET_CODE (cond2)) == '<'
165 && GET_CODE (cond1) == reverse_condition (GET_CODE (cond2))
166 && XEXP (cond1, 0) == XEXP (cond2, 0)
167 && XEXP (cond1, 1) == XEXP (cond2, 1))
168 return 1;
169 return 0;
172 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
173 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
174 of dependence that this link represents. */
176 void
177 add_dependence (insn, elem, dep_type)
178 rtx insn;
179 rtx elem;
180 enum reg_note dep_type;
182 rtx link, next;
183 int present_p;
184 enum reg_note present_dep_type;
185 rtx cond1, cond2;
187 /* Don't depend an insn on itself. */
188 if (insn == elem)
189 return;
191 /* We can get a dependency on deleted insns due to optimizations in
192 the register allocation and reloading or due to splitting. Any
193 such dependency is useless and can be ignored. */
194 if (GET_CODE (elem) == NOTE)
195 return;
197 /* flow.c doesn't handle conditional lifetimes entirely correctly;
198 calls mess up the conditional lifetimes. */
199 if (GET_CODE (insn) != CALL_INSN && GET_CODE (elem) != CALL_INSN)
201 cond1 = get_condition (insn);
202 cond2 = get_condition (elem);
203 if (cond1 && cond2 && conditions_mutex_p (cond1, cond2))
204 return;
207 /* If elem is part of a sequence that must be scheduled together, then
208 make the dependence point to the last insn of the sequence.
209 When HAVE_cc0, it is possible for NOTEs to exist between users and
210 setters of the condition codes, so we must skip past notes here.
211 Otherwise, NOTEs are impossible here. */
212 next = next_nonnote_insn (elem);
213 if (next && SCHED_GROUP_P (next)
214 && GET_CODE (next) != CODE_LABEL)
216 /* Notes will never intervene here though, so don't bother checking
217 for them. */
218 /* Hah! Wrong. */
219 /* We must reject CODE_LABELs, so that we don't get confused by one
220 that has LABEL_PRESERVE_P set, which is represented by the same
221 bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
222 SCHED_GROUP_P. */
224 rtx nnext;
225 while ((nnext = next_nonnote_insn (next)) != NULL
226 && SCHED_GROUP_P (nnext)
227 && GET_CODE (nnext) != CODE_LABEL)
228 next = nnext;
230 /* Again, don't depend an insn on itself. */
231 if (insn == next)
232 return;
234 /* Make the dependence to NEXT, the last insn of the group, instead
235 of the original ELEM. */
236 elem = next;
239 present_p = 1;
240 #ifdef INSN_SCHEDULING
241 /* ??? No good way to tell from here whether we're doing interblock
242 scheduling. Possibly add another callback. */
243 #if 0
244 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
245 No need for interblock dependences with calls, since
246 calls are not moved between blocks. Note: the edge where
247 elem is a CALL is still required. */
248 if (GET_CODE (insn) == CALL_INSN
249 && (INSN_BB (elem) != INSN_BB (insn)))
250 return;
251 #endif
253 /* If we already have a dependency for ELEM, then we do not need to
254 do anything. Avoiding the list walk below can cut compile times
255 dramatically for some code. */
256 if (true_dependency_cache != NULL)
258 if (anti_dependency_cache == NULL || output_dependency_cache == NULL)
259 abort ();
260 if (TEST_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem)))
261 present_dep_type = 0;
262 else if (TEST_BIT (anti_dependency_cache[INSN_LUID (insn)],
263 INSN_LUID (elem)))
264 present_dep_type = REG_DEP_ANTI;
265 else if (TEST_BIT (output_dependency_cache[INSN_LUID (insn)],
266 INSN_LUID (elem)))
267 present_dep_type = REG_DEP_OUTPUT;
268 else
269 present_p = 0;
270 if (present_p && (int) dep_type >= (int) present_dep_type)
271 return;
273 #endif
275 /* Check that we don't already have this dependence. */
276 if (present_p)
277 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
278 if (XEXP (link, 0) == elem)
280 #ifdef INSN_SCHEDULING
281 /* Clear corresponding cache entry because type of the link
282 may be changed. */
283 if (true_dependency_cache != NULL)
285 if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
286 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
287 INSN_LUID (elem));
288 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
289 && output_dependency_cache)
290 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
291 INSN_LUID (elem));
292 else
293 abort ();
295 #endif
297 /* If this is a more restrictive type of dependence than the existing
298 one, then change the existing dependence to this type. */
299 if ((int) dep_type < (int) REG_NOTE_KIND (link))
300 PUT_REG_NOTE_KIND (link, dep_type);
302 #ifdef INSN_SCHEDULING
303 /* If we are adding a dependency to INSN's LOG_LINKs, then
304 note that in the bitmap caches of dependency information. */
305 if (true_dependency_cache != NULL)
307 if ((int)REG_NOTE_KIND (link) == 0)
308 SET_BIT (true_dependency_cache[INSN_LUID (insn)],
309 INSN_LUID (elem));
310 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
311 SET_BIT (anti_dependency_cache[INSN_LUID (insn)],
312 INSN_LUID (elem));
313 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
314 SET_BIT (output_dependency_cache[INSN_LUID (insn)],
315 INSN_LUID (elem));
317 #endif
318 return;
320 /* Might want to check one level of transitivity to save conses. */
322 link = alloc_INSN_LIST (elem, LOG_LINKS (insn));
323 LOG_LINKS (insn) = link;
325 /* Insn dependency, not data dependency. */
326 PUT_REG_NOTE_KIND (link, dep_type);
328 #ifdef INSN_SCHEDULING
329 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
330 in the bitmap caches of dependency information. */
331 if (true_dependency_cache != NULL)
333 if ((int)dep_type == 0)
334 SET_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
335 else if (dep_type == REG_DEP_ANTI)
336 SET_BIT (anti_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
337 else if (dep_type == REG_DEP_OUTPUT)
338 SET_BIT (output_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
340 #endif
343 /* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
344 of INSN. Abort if not found. */
346 static void
347 remove_dependence (insn, elem)
348 rtx insn;
349 rtx elem;
351 rtx prev, link, next;
352 int found = 0;
354 for (prev = 0, link = LOG_LINKS (insn); link; link = next)
356 next = XEXP (link, 1);
357 if (XEXP (link, 0) == elem)
359 if (prev)
360 XEXP (prev, 1) = next;
361 else
362 LOG_LINKS (insn) = next;
364 #ifdef INSN_SCHEDULING
365 /* If we are removing a dependency from the LOG_LINKS list,
366 make sure to remove it from the cache too. */
367 if (true_dependency_cache != NULL)
369 if (REG_NOTE_KIND (link) == 0)
370 RESET_BIT (true_dependency_cache[INSN_LUID (insn)],
371 INSN_LUID (elem));
372 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
373 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
374 INSN_LUID (elem));
375 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
376 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
377 INSN_LUID (elem));
379 #endif
381 free_INSN_LIST_node (link);
383 found = 1;
385 else
386 prev = link;
389 if (!found)
390 abort ();
391 return;
394 /* Return an insn which represents a SCHED_GROUP, which is
395 the last insn in the group. */
397 static rtx
398 group_leader (insn)
399 rtx insn;
401 rtx prev;
405 prev = insn;
406 insn = next_nonnote_insn (insn);
408 while (insn && SCHED_GROUP_P (insn) && (GET_CODE (insn) != CODE_LABEL));
410 return prev;
413 /* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
414 goes along with that. */
416 static void
417 set_sched_group_p (insn)
418 rtx insn;
420 rtx link, prev;
422 SCHED_GROUP_P (insn) = 1;
424 /* There may be a note before this insn now, but all notes will
425 be removed before we actually try to schedule the insns, so
426 it won't cause a problem later. We must avoid it here though. */
427 prev = prev_nonnote_insn (insn);
429 /* Make a copy of all dependencies on the immediately previous insn,
430 and add to this insn. This is so that all the dependencies will
431 apply to the group. Remove an explicit dependence on this insn
432 as SCHED_GROUP_P now represents it. */
434 if (find_insn_list (prev, LOG_LINKS (insn)))
435 remove_dependence (insn, prev);
437 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
438 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
441 /* Process an insn's memory dependencies. There are four kinds of
442 dependencies:
444 (0) read dependence: read follows read
445 (1) true dependence: read follows write
446 (2) anti dependence: write follows read
447 (3) output dependence: write follows write
449 We are careful to build only dependencies which actually exist, and
450 use transitivity to avoid building too many links. */
452 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
453 The MEM is a memory reference contained within INSN, which we are saving
454 so that we can do memory aliasing on it. */
456 void
457 add_insn_mem_dependence (deps, insn_list, mem_list, insn, mem)
458 struct deps *deps;
459 rtx *insn_list, *mem_list, insn, mem;
461 register rtx link;
463 link = alloc_INSN_LIST (insn, *insn_list);
464 *insn_list = link;
466 link = alloc_EXPR_LIST (VOIDmode, mem, *mem_list);
467 *mem_list = link;
469 deps->pending_lists_length++;
472 /* Make a dependency between every memory reference on the pending lists
473 and INSN, thus flushing the pending lists. If ONLY_WRITE, don't flush
474 the read list. */
476 static void
477 flush_pending_lists (deps, insn, only_write)
478 struct deps *deps;
479 rtx insn;
480 int only_write;
482 rtx u;
483 rtx link;
485 while (deps->pending_read_insns && ! only_write)
487 add_dependence (insn, XEXP (deps->pending_read_insns, 0),
488 REG_DEP_ANTI);
490 link = deps->pending_read_insns;
491 deps->pending_read_insns = XEXP (deps->pending_read_insns, 1);
492 free_INSN_LIST_node (link);
494 link = deps->pending_read_mems;
495 deps->pending_read_mems = XEXP (deps->pending_read_mems, 1);
496 free_EXPR_LIST_node (link);
498 while (deps->pending_write_insns)
500 add_dependence (insn, XEXP (deps->pending_write_insns, 0),
501 REG_DEP_ANTI);
503 link = deps->pending_write_insns;
504 deps->pending_write_insns = XEXP (deps->pending_write_insns, 1);
505 free_INSN_LIST_node (link);
507 link = deps->pending_write_mems;
508 deps->pending_write_mems = XEXP (deps->pending_write_mems, 1);
509 free_EXPR_LIST_node (link);
511 deps->pending_lists_length = 0;
513 /* last_pending_memory_flush is now a list of insns. */
514 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
515 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
517 free_INSN_LIST_list (&deps->last_pending_memory_flush);
518 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
521 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
522 rtx, X, creating all dependencies generated by the write to the
523 destination of X, and reads of everything mentioned. */
525 static void
526 sched_analyze_1 (deps, x, insn)
527 struct deps *deps;
528 rtx x;
529 rtx insn;
531 register int regno;
532 register rtx dest = XEXP (x, 0);
533 enum rtx_code code = GET_CODE (x);
535 if (dest == 0)
536 return;
538 if (GET_CODE (dest) == PARALLEL
539 && GET_MODE (dest) == BLKmode)
541 register int i;
542 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
543 sched_analyze_1 (deps, XVECEXP (dest, 0, i), insn);
544 if (GET_CODE (x) == SET)
545 sched_analyze_2 (deps, SET_SRC (x), insn);
546 return;
549 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
550 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
552 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
554 /* The second and third arguments are values read by this insn. */
555 sched_analyze_2 (deps, XEXP (dest, 1), insn);
556 sched_analyze_2 (deps, XEXP (dest, 2), insn);
558 dest = XEXP (dest, 0);
561 if (GET_CODE (dest) == REG)
563 register int i;
565 regno = REGNO (dest);
567 /* A hard reg in a wide mode may really be multiple registers.
568 If so, mark all of them just like the first. */
569 if (regno < FIRST_PSEUDO_REGISTER)
571 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
572 while (--i >= 0)
574 int r = regno + i;
575 rtx u;
577 for (u = deps->reg_last_uses[r]; u; u = XEXP (u, 1))
578 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
580 for (u = deps->reg_last_sets[r]; u; u = XEXP (u, 1))
581 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
583 /* Clobbers need not be ordered with respect to one
584 another, but sets must be ordered with respect to a
585 pending clobber. */
586 if (code == SET)
588 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
589 free_INSN_LIST_list (&deps->reg_last_uses[r]);
590 for (u = deps->reg_last_clobbers[r]; u; u = XEXP (u, 1))
591 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
592 SET_REGNO_REG_SET (reg_pending_sets, r);
594 else
595 SET_REGNO_REG_SET (reg_pending_clobbers, r);
597 /* Function calls clobber all call_used regs. */
598 if (global_regs[r] || (code == SET && call_used_regs[r]))
599 for (u = deps->last_function_call; u; u = XEXP (u, 1))
600 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
603 else
605 rtx u;
607 for (u = deps->reg_last_uses[regno]; u; u = XEXP (u, 1))
608 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
610 for (u = deps->reg_last_sets[regno]; u; u = XEXP (u, 1))
611 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
613 if (code == SET)
615 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
616 free_INSN_LIST_list (&deps->reg_last_uses[regno]);
617 for (u = deps->reg_last_clobbers[regno]; u; u = XEXP (u, 1))
618 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
619 SET_REGNO_REG_SET (reg_pending_sets, regno);
621 else
622 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
624 /* Pseudos that are REG_EQUIV to something may be replaced
625 by that during reloading. We need only add dependencies for
626 the address in the REG_EQUIV note. */
627 if (!reload_completed
628 && reg_known_equiv_p[regno]
629 && GET_CODE (reg_known_value[regno]) == MEM)
630 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
632 /* Don't let it cross a call after scheduling if it doesn't
633 already cross one. */
635 if (REG_N_CALLS_CROSSED (regno) == 0)
636 for (u = deps->last_function_call; u; u = XEXP (u, 1))
637 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
640 else if (GET_CODE (dest) == MEM)
642 /* Writing memory. */
644 if (deps->pending_lists_length > 32)
646 /* Flush all pending reads and writes to prevent the pending lists
647 from getting any larger. Insn scheduling runs too slowly when
648 these lists get long. The number 32 was chosen because it
649 seems like a reasonable number. When compiling GCC with itself,
650 this flush occurs 8 times for sparc, and 10 times for m88k using
651 the number 32. */
652 flush_pending_lists (deps, insn, 0);
654 else
656 rtx u;
657 rtx pending, pending_mem;
659 pending = deps->pending_read_insns;
660 pending_mem = deps->pending_read_mems;
661 while (pending)
663 if (anti_dependence (XEXP (pending_mem, 0), dest))
664 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
666 pending = XEXP (pending, 1);
667 pending_mem = XEXP (pending_mem, 1);
670 pending = deps->pending_write_insns;
671 pending_mem = deps->pending_write_mems;
672 while (pending)
674 if (output_dependence (XEXP (pending_mem, 0), dest))
675 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
677 pending = XEXP (pending, 1);
678 pending_mem = XEXP (pending_mem, 1);
681 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
682 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
684 add_insn_mem_dependence (deps, &deps->pending_write_insns,
685 &deps->pending_write_mems, insn, dest);
687 sched_analyze_2 (deps, XEXP (dest, 0), insn);
690 /* Analyze reads. */
691 if (GET_CODE (x) == SET)
692 sched_analyze_2 (deps, SET_SRC (x), insn);
695 /* Analyze the uses of memory and registers in rtx X in INSN. */
697 static void
698 sched_analyze_2 (deps, x, insn)
699 struct deps *deps;
700 rtx x;
701 rtx insn;
703 register int i;
704 register int j;
705 register enum rtx_code code;
706 register const char *fmt;
708 if (x == 0)
709 return;
711 code = GET_CODE (x);
713 switch (code)
715 case CONST_INT:
716 case CONST_DOUBLE:
717 case SYMBOL_REF:
718 case CONST:
719 case LABEL_REF:
720 /* Ignore constants. Note that we must handle CONST_DOUBLE here
721 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
722 this does not mean that this insn is using cc0. */
723 return;
725 #ifdef HAVE_cc0
726 case CC0:
727 /* User of CC0 depends on immediately preceding insn. */
728 set_sched_group_p (insn);
729 return;
730 #endif
732 case REG:
734 rtx u;
735 int regno = REGNO (x);
736 if (regno < FIRST_PSEUDO_REGISTER)
738 int i;
740 i = HARD_REGNO_NREGS (regno, GET_MODE (x));
741 while (--i >= 0)
743 int r = regno + i;
744 deps->reg_last_uses[r]
745 = alloc_INSN_LIST (insn, deps->reg_last_uses[r]);
747 for (u = deps->reg_last_sets[r]; u; u = XEXP (u, 1))
748 add_dependence (insn, XEXP (u, 0), 0);
750 /* ??? This should never happen. */
751 for (u = deps->reg_last_clobbers[r]; u; u = XEXP (u, 1))
752 add_dependence (insn, XEXP (u, 0), 0);
754 if (call_used_regs[r] || global_regs[r])
755 /* Function calls clobber all call_used regs. */
756 for (u = deps->last_function_call; u; u = XEXP (u, 1))
757 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
760 else
762 deps->reg_last_uses[regno]
763 = alloc_INSN_LIST (insn, deps->reg_last_uses[regno]);
765 for (u = deps->reg_last_sets[regno]; u; u = XEXP (u, 1))
766 add_dependence (insn, XEXP (u, 0), 0);
768 /* ??? This should never happen. */
769 for (u = deps->reg_last_clobbers[regno]; u; u = XEXP (u, 1))
770 add_dependence (insn, XEXP (u, 0), 0);
772 /* Pseudos that are REG_EQUIV to something may be replaced
773 by that during reloading. We need only add dependencies for
774 the address in the REG_EQUIV note. */
775 if (!reload_completed
776 && reg_known_equiv_p[regno]
777 && GET_CODE (reg_known_value[regno]) == MEM)
778 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
780 /* If the register does not already cross any calls, then add this
781 insn to the sched_before_next_call list so that it will still
782 not cross calls after scheduling. */
783 if (REG_N_CALLS_CROSSED (regno) == 0)
784 add_dependence (deps->sched_before_next_call, insn,
785 REG_DEP_ANTI);
787 return;
790 case MEM:
792 /* Reading memory. */
793 rtx u;
794 rtx pending, pending_mem;
796 pending = deps->pending_read_insns;
797 pending_mem = deps->pending_read_mems;
798 while (pending)
800 if (read_dependence (XEXP (pending_mem, 0), x))
801 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
803 pending = XEXP (pending, 1);
804 pending_mem = XEXP (pending_mem, 1);
807 pending = deps->pending_write_insns;
808 pending_mem = deps->pending_write_mems;
809 while (pending)
811 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
812 x, rtx_varies_p))
813 add_dependence (insn, XEXP (pending, 0), 0);
815 pending = XEXP (pending, 1);
816 pending_mem = XEXP (pending_mem, 1);
819 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
820 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
822 /* Always add these dependencies to pending_reads, since
823 this insn may be followed by a write. */
824 add_insn_mem_dependence (deps, &deps->pending_read_insns,
825 &deps->pending_read_mems, insn, x);
827 /* Take advantage of tail recursion here. */
828 sched_analyze_2 (deps, XEXP (x, 0), insn);
829 return;
832 /* Force pending stores to memory in case a trap handler needs them. */
833 case TRAP_IF:
834 flush_pending_lists (deps, insn, 1);
835 break;
837 case ASM_OPERANDS:
838 case ASM_INPUT:
839 case UNSPEC_VOLATILE:
841 rtx u;
843 /* Traditional and volatile asm instructions must be considered to use
844 and clobber all hard registers, all pseudo-registers and all of
845 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
847 Consider for instance a volatile asm that changes the fpu rounding
848 mode. An insn should not be moved across this even if it only uses
849 pseudo-regs because it might give an incorrectly rounded result. */
850 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
852 int max_reg = max_reg_num ();
853 for (i = 0; i < max_reg; i++)
855 for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
856 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
857 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
858 free_INSN_LIST_list (&deps->reg_last_uses[i]);
860 for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
861 add_dependence (insn, XEXP (u, 0), 0);
863 for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
864 add_dependence (insn, XEXP (u, 0), 0);
866 reg_pending_sets_all = 1;
868 flush_pending_lists (deps, insn, 0);
871 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
872 We can not just fall through here since then we would be confused
873 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
874 traditional asms unlike their normal usage. */
876 if (code == ASM_OPERANDS)
878 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
879 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
880 return;
882 break;
885 case PRE_DEC:
886 case POST_DEC:
887 case PRE_INC:
888 case POST_INC:
889 /* These both read and modify the result. We must handle them as writes
890 to get proper dependencies for following instructions. We must handle
891 them as reads to get proper dependencies from this to previous
892 instructions. Thus we need to pass them to both sched_analyze_1
893 and sched_analyze_2. We must call sched_analyze_2 first in order
894 to get the proper antecedent for the read. */
895 sched_analyze_2 (deps, XEXP (x, 0), insn);
896 sched_analyze_1 (deps, x, insn);
897 return;
899 case POST_MODIFY:
900 case PRE_MODIFY:
901 /* op0 = op0 + op1 */
902 sched_analyze_2 (deps, XEXP (x, 0), insn);
903 sched_analyze_2 (deps, XEXP (x, 1), insn);
904 sched_analyze_1 (deps, x, insn);
905 return;
907 default:
908 break;
911 /* Other cases: walk the insn. */
912 fmt = GET_RTX_FORMAT (code);
913 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
915 if (fmt[i] == 'e')
916 sched_analyze_2 (deps, XEXP (x, i), insn);
917 else if (fmt[i] == 'E')
918 for (j = 0; j < XVECLEN (x, i); j++)
919 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
923 /* Analyze an INSN with pattern X to find all dependencies. */
925 static void
926 sched_analyze_insn (deps, x, insn, loop_notes)
927 struct deps *deps;
928 rtx x, insn;
929 rtx loop_notes;
931 register RTX_CODE code = GET_CODE (x);
932 rtx link;
933 int maxreg = max_reg_num ();
934 int i;
936 if (code == COND_EXEC)
938 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
940 /* ??? Should be recording conditions so we reduce the number of
941 false dependancies. */
942 x = COND_EXEC_CODE (x);
943 code = GET_CODE (x);
945 if (code == SET || code == CLOBBER)
946 sched_analyze_1 (deps, x, insn);
947 else if (code == PARALLEL)
949 register int i;
950 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
952 rtx sub = XVECEXP (x, 0, i);
953 code = GET_CODE (sub);
955 if (code == COND_EXEC)
957 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
958 sub = COND_EXEC_CODE (sub);
959 code = GET_CODE (sub);
961 if (code == SET || code == CLOBBER)
962 sched_analyze_1 (deps, sub, insn);
963 else
964 sched_analyze_2 (deps, sub, insn);
967 else
968 sched_analyze_2 (deps, x, insn);
970 /* Mark registers CLOBBERED or used by called function. */
971 if (GET_CODE (insn) == CALL_INSN)
972 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
974 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
975 sched_analyze_1 (deps, XEXP (link, 0), insn);
976 else
977 sched_analyze_2 (deps, XEXP (link, 0), insn);
980 if (GET_CODE (insn) == JUMP_INSN)
982 rtx next, u, pending, pending_mem;
983 next = next_nonnote_insn (insn);
984 if (next && GET_CODE (next) == BARRIER)
986 for (i = 0; i < maxreg; i++)
988 for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
989 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
990 for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
991 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
992 for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
993 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
996 else
998 regset_head tmp;
999 INIT_REG_SET (&tmp);
1001 (*current_sched_info->compute_jump_reg_dependencies) (insn, &tmp);
1002 EXECUTE_IF_SET_IN_REG_SET
1003 (&tmp, 0, i,
1005 for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
1006 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1007 deps->reg_last_uses[i]
1008 = alloc_INSN_LIST (insn, deps->reg_last_uses[i]);
1011 CLEAR_REG_SET (&tmp);
1013 pending = deps->pending_write_insns;
1014 pending_mem = deps->pending_write_mems;
1015 while (pending)
1017 add_dependence (insn, XEXP (pending, 0), 0);
1019 pending = XEXP (pending, 1);
1020 pending_mem = XEXP (pending_mem, 1);
1023 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
1024 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1027 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
1028 block, then we must be sure that no instructions are scheduled across it.
1029 Otherwise, the reg_n_refs info (which depends on loop_depth) would
1030 become incorrect. */
1032 if (loop_notes)
1034 int max_reg = max_reg_num ();
1035 int schedule_barrier_found = 0;
1036 rtx link;
1038 /* Update loop_notes with any notes from this insn. Also determine
1039 if any of the notes on the list correspond to instruction scheduling
1040 barriers (loop, eh & setjmp notes, but not range notes. */
1041 link = loop_notes;
1042 while (XEXP (link, 1))
1044 if (INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_BEG
1045 || INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END
1046 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_BEG
1047 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END
1048 || INTVAL (XEXP (link, 0)) == NOTE_INSN_SETJMP)
1049 schedule_barrier_found = 1;
1051 link = XEXP (link, 1);
1053 XEXP (link, 1) = REG_NOTES (insn);
1054 REG_NOTES (insn) = loop_notes;
1056 /* Add dependencies if a scheduling barrier was found. */
1057 if (schedule_barrier_found)
1059 for (i = 0; i < max_reg; i++)
1061 rtx u;
1062 for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
1063 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1064 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1065 free_INSN_LIST_list (&deps->reg_last_uses[i]);
1067 for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
1068 add_dependence (insn, XEXP (u, 0), 0);
1070 for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
1071 add_dependence (insn, XEXP (u, 0), 0);
1073 reg_pending_sets_all = 1;
1075 flush_pending_lists (deps, insn, 0);
1080 /* Accumulate clobbers until the next set so that it will be output dependent
1081 on all of them. At the next set we can clear the clobber list, since
1082 subsequent sets will be output dependent on it. */
1083 EXECUTE_IF_SET_IN_REG_SET
1084 (reg_pending_sets, 0, i,
1086 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1088 free_INSN_LIST_list (&deps->reg_last_sets[i]);
1089 free_INSN_LIST_list (&deps->reg_last_clobbers[i]);
1090 deps->reg_last_sets[i] = 0;
1092 deps->reg_last_sets[i]
1093 = alloc_INSN_LIST (insn, deps->reg_last_sets[i]);
1095 EXECUTE_IF_SET_IN_REG_SET
1096 (reg_pending_clobbers, 0, i,
1098 deps->reg_last_clobbers[i]
1099 = alloc_INSN_LIST (insn, deps->reg_last_clobbers[i]);
1101 CLEAR_REG_SET (reg_pending_sets);
1102 CLEAR_REG_SET (reg_pending_clobbers);
1104 if (reg_pending_sets_all)
1106 for (i = 0; i < maxreg; i++)
1108 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1110 free_INSN_LIST_list (&deps->reg_last_sets[i]);
1111 free_INSN_LIST_list (&deps->reg_last_clobbers[i]);
1112 deps->reg_last_sets[i] = 0;
1114 deps->reg_last_sets[i]
1115 = alloc_INSN_LIST (insn, deps->reg_last_sets[i]);
1118 reg_pending_sets_all = 0;
1121 /* If a post-call group is still open, see if it should remain so.
1122 This insn must be a simple move of a hard reg to a pseudo or
1123 vice-versa.
1125 We must avoid moving these insns for correctness on
1126 SMALL_REGISTER_CLASS machines, and for special registers like
1127 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1128 hard regs for all targets. */
1130 if (deps->in_post_call_group_p)
1132 rtx tmp, set = single_set (insn);
1133 int src_regno, dest_regno;
1135 if (set == NULL)
1136 goto end_call_group;
1138 tmp = SET_DEST (set);
1139 if (GET_CODE (tmp) == SUBREG)
1140 tmp = SUBREG_REG (tmp);
1141 if (GET_CODE (tmp) == REG)
1142 dest_regno = REGNO (tmp);
1143 else
1144 goto end_call_group;
1146 tmp = SET_SRC (set);
1147 if (GET_CODE (tmp) == SUBREG)
1148 tmp = SUBREG_REG (tmp);
1149 if (GET_CODE (tmp) == REG)
1150 src_regno = REGNO (tmp);
1151 else
1152 goto end_call_group;
1154 if (src_regno < FIRST_PSEUDO_REGISTER
1155 || dest_regno < FIRST_PSEUDO_REGISTER)
1157 set_sched_group_p (insn);
1158 CANT_MOVE (insn) = 1;
1160 else
1162 end_call_group:
1163 deps->in_post_call_group_p = 0;
1168 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1169 for every dependency. */
1171 void
1172 sched_analyze (deps, head, tail)
1173 struct deps *deps;
1174 rtx head, tail;
1176 register rtx insn;
1177 register rtx u;
1178 rtx loop_notes = 0;
1180 for (insn = head;; insn = NEXT_INSN (insn))
1182 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1184 /* Clear out the stale LOG_LINKS from flow. */
1185 free_INSN_LIST_list (&LOG_LINKS (insn));
1187 /* Clear out stale SCHED_GROUP_P. */
1188 SCHED_GROUP_P (insn) = 0;
1190 /* Make each JUMP_INSN a scheduling barrier for memory
1191 references. */
1192 if (GET_CODE (insn) == JUMP_INSN)
1193 deps->last_pending_memory_flush
1194 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
1195 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1196 loop_notes = 0;
1198 else if (GET_CODE (insn) == CALL_INSN)
1200 rtx x;
1201 register int i;
1203 /* Clear out stale SCHED_GROUP_P. */
1204 SCHED_GROUP_P (insn) = 0;
1206 CANT_MOVE (insn) = 1;
1208 /* Clear out the stale LOG_LINKS from flow. */
1209 free_INSN_LIST_list (&LOG_LINKS (insn));
1211 /* Any instruction using a hard register which may get clobbered
1212 by a call needs to be marked as dependent on this call.
1213 This prevents a use of a hard return reg from being moved
1214 past a void call (i.e. it does not explicitly set the hard
1215 return reg). */
1217 /* If this call is followed by a NOTE_INSN_SETJMP, then assume that
1218 all registers, not just hard registers, may be clobbered by this
1219 call. */
1221 /* Insn, being a CALL_INSN, magically depends on
1222 `last_function_call' already. */
1224 if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == NOTE
1225 && NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP)
1227 int max_reg = max_reg_num ();
1228 for (i = 0; i < max_reg; i++)
1230 for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
1231 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1232 free_INSN_LIST_list (&deps->reg_last_uses[i]);
1234 for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
1235 add_dependence (insn, XEXP (u, 0), 0);
1237 for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
1238 add_dependence (insn, XEXP (u, 0), 0);
1240 reg_pending_sets_all = 1;
1242 /* Add a pair of REG_SAVE_NOTEs which we will later
1243 convert back into a NOTE_INSN_SETJMP note. See
1244 reemit_notes for why we use a pair of NOTEs. */
1245 REG_NOTES (insn) = alloc_EXPR_LIST (REG_SAVE_NOTE,
1246 GEN_INT (0),
1247 REG_NOTES (insn));
1248 REG_NOTES (insn) = alloc_EXPR_LIST (REG_SAVE_NOTE,
1249 GEN_INT (NOTE_INSN_SETJMP),
1250 REG_NOTES (insn));
1252 else
1254 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1255 if (call_used_regs[i] || global_regs[i])
1257 for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
1258 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1260 for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
1261 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1263 SET_REGNO_REG_SET (reg_pending_clobbers, i);
1267 /* For each insn which shouldn't cross a call, add a dependence
1268 between that insn and this call insn. */
1269 x = LOG_LINKS (deps->sched_before_next_call);
1270 while (x)
1272 add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
1273 x = XEXP (x, 1);
1275 free_INSN_LIST_list (&LOG_LINKS (deps->sched_before_next_call));
1277 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1278 loop_notes = 0;
1280 /* In the absence of interprocedural alias analysis, we must flush
1281 all pending reads and writes, and start new dependencies starting
1282 from here. But only flush writes for constant calls (which may
1283 be passed a pointer to something we haven't written yet). */
1284 flush_pending_lists (deps, insn, CONST_CALL_P (insn));
1286 /* Depend this function call (actually, the user of this
1287 function call) on all hard register clobberage. */
1289 /* last_function_call is now a list of insns. */
1290 free_INSN_LIST_list (&deps->last_function_call);
1291 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
1293 /* Before reload, begin a post-call group, so as to keep the
1294 lifetimes of hard registers correct. */
1295 if (! reload_completed)
1296 deps->in_post_call_group_p = 1;
1299 /* See comments on reemit_notes as to why we do this.
1300 ??? Actually, the reemit_notes just say what is done, not why. */
1302 else if (GET_CODE (insn) == NOTE
1303 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_BEG
1304 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_END))
1306 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE, NOTE_RANGE_INFO (insn),
1307 loop_notes);
1308 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1309 GEN_INT (NOTE_LINE_NUMBER (insn)),
1310 loop_notes);
1312 else if (GET_CODE (insn) == NOTE
1313 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
1314 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
1315 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1316 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END
1317 || (NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP
1318 && GET_CODE (PREV_INSN (insn)) != CALL_INSN)))
1320 rtx rtx_region;
1322 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1323 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END)
1324 rtx_region = GEN_INT (NOTE_EH_HANDLER (insn));
1325 else
1326 rtx_region = GEN_INT (0);
1328 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1329 rtx_region,
1330 loop_notes);
1331 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1332 GEN_INT (NOTE_LINE_NUMBER (insn)),
1333 loop_notes);
1334 CONST_CALL_P (loop_notes) = CONST_CALL_P (insn);
1337 if (insn == tail)
1338 return;
1340 abort ();
1343 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1344 dependences from LOG_LINKS to build forward dependences in
1345 INSN_DEPEND. */
1347 void
1348 compute_forward_dependences (head, tail)
1349 rtx head, tail;
1351 rtx insn, link;
1352 rtx next_tail;
1353 enum reg_note dep_type;
1355 next_tail = NEXT_INSN (tail);
1356 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1358 if (! INSN_P (insn))
1359 continue;
1361 insn = group_leader (insn);
1363 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
1365 rtx x = group_leader (XEXP (link, 0));
1366 rtx new_link;
1368 if (x != XEXP (link, 0))
1369 continue;
1371 #ifdef ENABLE_CHECKING
1372 /* If add_dependence is working properly there should never
1373 be notes, deleted insns or duplicates in the backward
1374 links. Thus we need not check for them here.
1376 However, if we have enabled checking we might as well go
1377 ahead and verify that add_dependence worked properly. */
1378 if (GET_CODE (x) == NOTE
1379 || INSN_DELETED_P (x)
1380 || (forward_dependency_cache != NULL
1381 && TEST_BIT (forward_dependency_cache[INSN_LUID (x)],
1382 INSN_LUID (insn)))
1383 || (forward_dependency_cache == NULL
1384 && find_insn_list (insn, INSN_DEPEND (x))))
1385 abort ();
1386 if (forward_dependency_cache != NULL)
1387 SET_BIT (forward_dependency_cache[INSN_LUID (x)],
1388 INSN_LUID (insn));
1389 #endif
1391 new_link = alloc_INSN_LIST (insn, INSN_DEPEND (x));
1393 dep_type = REG_NOTE_KIND (link);
1394 PUT_REG_NOTE_KIND (new_link, dep_type);
1396 INSN_DEPEND (x) = new_link;
1397 INSN_DEP_COUNT (insn) += 1;
1402 /* Initialize variables for region data dependence analysis.
1403 n_bbs is the number of region blocks. */
1405 void
1406 init_deps (deps)
1407 struct deps *deps;
1409 int maxreg = max_reg_num ();
1410 deps->reg_last_uses = (rtx *) xcalloc (maxreg, sizeof (rtx));
1411 deps->reg_last_sets = (rtx *) xcalloc (maxreg, sizeof (rtx));
1412 deps->reg_last_clobbers = (rtx *) xcalloc (maxreg, sizeof (rtx));
1414 deps->pending_read_insns = 0;
1415 deps->pending_read_mems = 0;
1416 deps->pending_write_insns = 0;
1417 deps->pending_write_mems = 0;
1418 deps->pending_lists_length = 0;
1419 deps->last_pending_memory_flush = 0;
1420 deps->last_function_call = 0;
1421 deps->in_post_call_group_p = 0;
1423 deps->sched_before_next_call
1424 = gen_rtx_INSN (VOIDmode, 0, NULL_RTX, NULL_RTX,
1425 NULL_RTX, 0, NULL_RTX, NULL_RTX);
1426 LOG_LINKS (deps->sched_before_next_call) = 0;
1429 /* Free insn lists found in DEPS. */
1431 void
1432 free_deps (deps)
1433 struct deps *deps;
1435 int max_reg = max_reg_num ();
1436 int i;
1438 /* Note this loop is executed max_reg * nr_regions times. It's first
1439 implementation accounted for over 90% of the calls to free_INSN_LIST_list.
1440 The list was empty for the vast majority of those calls. On the PA, not
1441 calling free_INSN_LIST_list in those cases improves -O2 compile times by
1442 3-5% on average. */
1443 for (i = 0; i < max_reg; ++i)
1445 if (deps->reg_last_clobbers[i])
1446 free_INSN_LIST_list (&deps->reg_last_clobbers[i]);
1447 if (deps->reg_last_sets[i])
1448 free_INSN_LIST_list (&deps->reg_last_sets[i]);
1449 if (deps->reg_last_uses[i])
1450 free_INSN_LIST_list (&deps->reg_last_uses[i]);
1452 free (deps->reg_last_clobbers);
1453 free (deps->reg_last_sets);
1454 free (deps->reg_last_uses);
1457 /* If it is profitable to use them, initialize caches for tracking
1458 dependency informatino. LUID is the number of insns to be scheduled,
1459 it is used in the estimate of profitability. */
1461 void
1462 init_dependency_caches (luid)
1463 int luid;
1465 /* ?!? We could save some memory by computing a per-region luid mapping
1466 which could reduce both the number of vectors in the cache and the size
1467 of each vector. Instead we just avoid the cache entirely unless the
1468 average number of instructions in a basic block is very high. See
1469 the comment before the declaration of true_dependency_cache for
1470 what we consider "very high". */
1471 if (luid / n_basic_blocks > 100 * 5)
1473 true_dependency_cache = sbitmap_vector_alloc (luid, luid);
1474 sbitmap_vector_zero (true_dependency_cache, luid);
1475 anti_dependency_cache = sbitmap_vector_alloc (luid, luid);
1476 sbitmap_vector_zero (anti_dependency_cache, luid);
1477 output_dependency_cache = sbitmap_vector_alloc (luid, luid);
1478 sbitmap_vector_zero (output_dependency_cache, luid);
1479 #ifdef ENABLE_CHECKING
1480 forward_dependency_cache = sbitmap_vector_alloc (luid, luid);
1481 sbitmap_vector_zero (forward_dependency_cache, luid);
1482 #endif
1486 /* Free the caches allocated in init_dependency_caches. */
1488 void
1489 free_dependency_caches ()
1491 if (true_dependency_cache)
1493 free (true_dependency_cache);
1494 true_dependency_cache = NULL;
1495 free (anti_dependency_cache);
1496 anti_dependency_cache = NULL;
1497 free (output_dependency_cache);
1498 output_dependency_cache = NULL;
1499 #ifdef ENABLE_CHECKING
1500 free (forward_dependency_cache);
1501 forward_dependency_cache = NULL;
1502 #endif
1506 /* Initialize some global variables needed by the dependency analysis
1507 code. */
1509 void
1510 init_deps_global ()
1512 reg_pending_sets = INITIALIZE_REG_SET (reg_pending_sets_head);
1513 reg_pending_clobbers = INITIALIZE_REG_SET (reg_pending_clobbers_head);
1514 reg_pending_sets_all = 0;
1517 /* Free everything used by the dependency analysis code. */
1519 void
1520 finish_deps_global ()
1522 FREE_REG_SET (reg_pending_sets);
1523 FREE_REG_SET (reg_pending_clobbers);