Remove tm.h and xm.h handling, as it wasn't used. Use nm.h only when needed.
[dragonfly.git] / contrib / gcc-4.1 / gcc / sched-deps.c
blob339eb02814db8f8cae761e997855032560eaed35
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to the Free
22 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
23 02110-1301, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "toplev.h"
30 #include "rtl.h"
31 #include "tm_p.h"
32 #include "hard-reg-set.h"
33 #include "regs.h"
34 #include "function.h"
35 #include "flags.h"
36 #include "insn-config.h"
37 #include "insn-attr.h"
38 #include "except.h"
39 #include "toplev.h"
40 #include "recog.h"
41 #include "sched-int.h"
42 #include "params.h"
43 #include "cselib.h"
44 #include "df.h"
47 static regset reg_pending_sets;
48 static regset reg_pending_clobbers;
49 static regset reg_pending_uses;
51 /* The following enumeration values tell us what dependencies we
52 should use to implement the barrier. We use true-dependencies for
53 TRUE_BARRIER and anti-dependencies for MOVE_BARRIER. */
54 enum reg_pending_barrier_mode
56 NOT_A_BARRIER = 0,
57 MOVE_BARRIER,
58 TRUE_BARRIER
61 static enum reg_pending_barrier_mode reg_pending_barrier;
63 /* To speed up the test for duplicate dependency links we keep a
64 record of dependencies created by add_dependence when the average
65 number of instructions in a basic block is very large.
67 Studies have shown that there is typically around 5 instructions between
68 branches for typical C code. So we can make a guess that the average
69 basic block is approximately 5 instructions long; we will choose 100X
70 the average size as a very large basic block.
72 Each insn has associated bitmaps for its dependencies. Each bitmap
73 has enough entries to represent a dependency on any other insn in
74 the insn chain. All bitmap for true dependencies cache is
75 allocated then the rest two ones are also allocated. */
76 static bitmap_head *true_dependency_cache;
77 static bitmap_head *anti_dependency_cache;
78 static bitmap_head *output_dependency_cache;
79 static int cache_size;
81 /* To speed up checking consistency of formed forward insn
82 dependencies we use the following cache. Another possible solution
83 could be switching off checking duplication of insns in forward
84 dependencies. */
85 #ifdef ENABLE_CHECKING
86 static bitmap_head *forward_dependency_cache;
87 #endif
89 static int deps_may_trap_p (rtx);
90 static void add_dependence_list (rtx, rtx, int, enum reg_note);
91 static void add_dependence_list_and_free (rtx, rtx *, int, enum reg_note);
92 static void delete_all_dependences (rtx);
93 static void fixup_sched_groups (rtx);
95 static void flush_pending_lists (struct deps *, rtx, int, int);
96 static void sched_analyze_1 (struct deps *, rtx, rtx);
97 static void sched_analyze_2 (struct deps *, rtx, rtx);
98 static void sched_analyze_insn (struct deps *, rtx, rtx, rtx);
100 static rtx sched_get_condition (rtx);
101 static int conditions_mutex_p (rtx, rtx);
103 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
105 static int
106 deps_may_trap_p (rtx mem)
108 rtx addr = XEXP (mem, 0);
110 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
112 rtx t = get_reg_known_value (REGNO (addr));
113 if (t)
114 addr = t;
116 return rtx_addr_can_trap_p (addr);
119 /* Return the INSN_LIST containing INSN in LIST, or NULL
120 if LIST does not contain INSN. */
123 find_insn_list (rtx insn, rtx list)
125 while (list)
127 if (XEXP (list, 0) == insn)
128 return list;
129 list = XEXP (list, 1);
131 return 0;
134 /* Find the condition under which INSN is executed. */
136 static rtx
137 sched_get_condition (rtx insn)
139 rtx pat = PATTERN (insn);
140 rtx src;
142 if (pat == 0)
143 return 0;
145 if (GET_CODE (pat) == COND_EXEC)
146 return COND_EXEC_TEST (pat);
148 if (!any_condjump_p (insn) || !onlyjump_p (insn))
149 return 0;
151 src = SET_SRC (pc_set (insn));
153 if (XEXP (src, 2) == pc_rtx)
154 return XEXP (src, 0);
155 else if (XEXP (src, 1) == pc_rtx)
157 rtx cond = XEXP (src, 0);
158 enum rtx_code revcode = reversed_comparison_code (cond, insn);
160 if (revcode == UNKNOWN)
161 return 0;
162 return gen_rtx_fmt_ee (revcode, GET_MODE (cond), XEXP (cond, 0),
163 XEXP (cond, 1));
166 return 0;
170 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
172 static int
173 conditions_mutex_p (rtx cond1, rtx cond2)
175 if (COMPARISON_P (cond1)
176 && COMPARISON_P (cond2)
177 && GET_CODE (cond1) == reversed_comparison_code (cond2, NULL)
178 && XEXP (cond1, 0) == XEXP (cond2, 0)
179 && XEXP (cond1, 1) == XEXP (cond2, 1))
180 return 1;
181 return 0;
184 /* Return true if insn1 and insn2 can never depend on one another because
185 the conditions under which they are executed are mutually exclusive. */
186 bool
187 sched_insns_conditions_mutex_p (rtx insn1, rtx insn2)
189 rtx cond1, cond2;
191 /* flow.c doesn't handle conditional lifetimes entirely correctly;
192 calls mess up the conditional lifetimes. */
193 if (!CALL_P (insn1) && !CALL_P (insn2))
195 cond1 = sched_get_condition (insn1);
196 cond2 = sched_get_condition (insn2);
197 if (cond1 && cond2
198 && conditions_mutex_p (cond1, cond2)
199 /* Make sure first instruction doesn't affect condition of second
200 instruction if switched. */
201 && !modified_in_p (cond1, insn2)
202 /* Make sure second instruction doesn't affect condition of first
203 instruction if switched. */
204 && !modified_in_p (cond2, insn1))
205 return true;
207 return false;
210 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
211 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the
212 type of dependence that this link represents. The function returns
213 nonzero if a new entry has been added to insn's LOG_LINK. */
216 add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
218 rtx link;
219 int present_p;
221 /* Don't depend an insn on itself. */
222 if (insn == elem)
223 return 0;
225 /* We can get a dependency on deleted insns due to optimizations in
226 the register allocation and reloading or due to splitting. Any
227 such dependency is useless and can be ignored. */
228 if (NOTE_P (elem))
229 return 0;
231 present_p = 1;
232 #ifdef INSN_SCHEDULING
233 /* ??? No good way to tell from here whether we're doing interblock
234 scheduling. Possibly add another callback. */
235 #if 0
236 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
237 No need for interblock dependences with calls, since
238 calls are not moved between blocks. Note: the edge where
239 elem is a CALL is still required. */
240 if (CALL_P (insn)
241 && (INSN_BB (elem) != INSN_BB (insn)))
242 return 0;
243 #endif
245 /* If we already have a dependency for ELEM, then we do not need to
246 do anything. Avoiding the list walk below can cut compile times
247 dramatically for some code. */
248 if (true_dependency_cache != NULL)
250 enum reg_note present_dep_type = 0;
252 gcc_assert (anti_dependency_cache);
253 gcc_assert (output_dependency_cache);
254 if (bitmap_bit_p (&true_dependency_cache[INSN_LUID (insn)],
255 INSN_LUID (elem)))
256 /* Do nothing (present_set_type is already 0). */
258 else if (bitmap_bit_p (&anti_dependency_cache[INSN_LUID (insn)],
259 INSN_LUID (elem)))
260 present_dep_type = REG_DEP_ANTI;
261 else if (bitmap_bit_p (&output_dependency_cache[INSN_LUID (insn)],
262 INSN_LUID (elem)))
263 present_dep_type = REG_DEP_OUTPUT;
264 else
265 present_p = 0;
266 if (present_p && (int) dep_type >= (int) present_dep_type)
267 return 0;
269 #endif
271 /* Check that we don't already have this dependence. */
272 if (present_p)
273 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
274 if (XEXP (link, 0) == elem)
276 #ifdef INSN_SCHEDULING
277 /* Clear corresponding cache entry because type of the link
278 may be changed. */
279 if (true_dependency_cache != NULL)
281 enum reg_note kind = REG_NOTE_KIND (link);
282 switch (kind)
284 case REG_DEP_ANTI:
285 bitmap_clear_bit (&anti_dependency_cache[INSN_LUID (insn)],
286 INSN_LUID (elem));
287 break;
288 case REG_DEP_OUTPUT:
289 gcc_assert (output_dependency_cache);
290 bitmap_clear_bit (&output_dependency_cache[INSN_LUID (insn)],
291 INSN_LUID (elem));
292 break;
293 default:
294 gcc_unreachable ();
297 #endif
299 /* If this is a more restrictive type of dependence than the existing
300 one, then change the existing dependence to this type. */
301 if ((int) dep_type < (int) REG_NOTE_KIND (link))
302 PUT_REG_NOTE_KIND (link, dep_type);
304 #ifdef INSN_SCHEDULING
305 /* If we are adding a dependency to INSN's LOG_LINKs, then
306 note that in the bitmap caches of dependency information. */
307 if (true_dependency_cache != NULL)
309 if ((int) REG_NOTE_KIND (link) == 0)
310 bitmap_set_bit (&true_dependency_cache[INSN_LUID (insn)],
311 INSN_LUID (elem));
312 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
313 bitmap_set_bit (&anti_dependency_cache[INSN_LUID (insn)],
314 INSN_LUID (elem));
315 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
316 bitmap_set_bit (&output_dependency_cache[INSN_LUID (insn)],
317 INSN_LUID (elem));
319 #endif
320 return 0;
322 /* Might want to check one level of transitivity to save conses. */
324 link = alloc_INSN_LIST (elem, LOG_LINKS (insn));
325 LOG_LINKS (insn) = link;
327 /* Insn dependency, not data dependency. */
328 PUT_REG_NOTE_KIND (link, dep_type);
330 #ifdef INSN_SCHEDULING
331 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
332 in the bitmap caches of dependency information. */
333 if (true_dependency_cache != NULL)
335 if ((int) dep_type == 0)
336 bitmap_set_bit (&true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
337 else if (dep_type == REG_DEP_ANTI)
338 bitmap_set_bit (&anti_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
339 else if (dep_type == REG_DEP_OUTPUT)
340 bitmap_set_bit (&output_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
342 #endif
343 return 1;
346 /* A convenience wrapper to operate on an entire list. */
348 static void
349 add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
351 for (; list; list = XEXP (list, 1))
353 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
354 add_dependence (insn, XEXP (list, 0), dep_type);
358 /* Similar, but free *LISTP at the same time. */
360 static void
361 add_dependence_list_and_free (rtx insn, rtx *listp, int uncond,
362 enum reg_note dep_type)
364 rtx list, next;
365 for (list = *listp, *listp = NULL; list ; list = next)
367 next = XEXP (list, 1);
368 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
369 add_dependence (insn, XEXP (list, 0), dep_type);
370 free_INSN_LIST_node (list);
374 /* Clear all dependencies for an insn. */
376 static void
377 delete_all_dependences (rtx insn)
379 /* Clear caches, if they exist, as well as free the dependence. */
381 #ifdef INSN_SCHEDULING
382 if (true_dependency_cache != NULL)
384 bitmap_clear (&true_dependency_cache[INSN_LUID (insn)]);
385 bitmap_clear (&anti_dependency_cache[INSN_LUID (insn)]);
386 bitmap_clear (&output_dependency_cache[INSN_LUID (insn)]);
388 #endif
390 free_INSN_LIST_list (&LOG_LINKS (insn));
393 /* All insns in a scheduling group except the first should only have
394 dependencies on the previous insn in the group. So we find the
395 first instruction in the scheduling group by walking the dependence
396 chains backwards. Then we add the dependencies for the group to
397 the previous nonnote insn. */
399 static void
400 fixup_sched_groups (rtx insn)
402 rtx link, prev_nonnote;
404 for (link = LOG_LINKS (insn); link ; link = XEXP (link, 1))
406 rtx i = insn;
409 i = prev_nonnote_insn (i);
411 if (XEXP (link, 0) == i)
412 goto next_link;
413 } while (SCHED_GROUP_P (i));
414 if (! sched_insns_conditions_mutex_p (i, XEXP (link, 0)))
415 add_dependence (i, XEXP (link, 0), REG_NOTE_KIND (link));
416 next_link:;
419 delete_all_dependences (insn);
421 prev_nonnote = prev_nonnote_insn (insn);
422 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
423 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
424 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
427 /* Process an insn's memory dependencies. There are four kinds of
428 dependencies:
430 (0) read dependence: read follows read
431 (1) true dependence: read follows write
432 (2) anti dependence: write follows read
433 (3) output dependence: write follows write
435 We are careful to build only dependencies which actually exist, and
436 use transitivity to avoid building too many links. */
438 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
439 The MEM is a memory reference contained within INSN, which we are saving
440 so that we can do memory aliasing on it. */
442 static void
443 add_insn_mem_dependence (struct deps *deps, rtx *insn_list, rtx *mem_list,
444 rtx insn, rtx mem)
446 rtx link;
448 link = alloc_INSN_LIST (insn, *insn_list);
449 *insn_list = link;
451 if (current_sched_info->use_cselib)
453 mem = shallow_copy_rtx (mem);
454 XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
456 link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
457 *mem_list = link;
459 deps->pending_lists_length++;
462 /* Make a dependency between every memory reference on the pending lists
463 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
464 dependencies for a read operation, similarly with FOR_WRITE. */
466 static void
467 flush_pending_lists (struct deps *deps, rtx insn, int for_read,
468 int for_write)
470 if (for_write)
472 add_dependence_list_and_free (insn, &deps->pending_read_insns, 1,
473 REG_DEP_ANTI);
474 free_EXPR_LIST_list (&deps->pending_read_mems);
477 add_dependence_list_and_free (insn, &deps->pending_write_insns, 1,
478 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
479 free_EXPR_LIST_list (&deps->pending_write_mems);
480 deps->pending_lists_length = 0;
482 add_dependence_list_and_free (insn, &deps->last_pending_memory_flush, 1,
483 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
484 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
485 deps->pending_flush_length = 1;
488 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
489 The type of the reference is specified by REF and can be SET,
490 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
492 static void
493 sched_analyze_reg (struct deps *deps, int regno, enum machine_mode mode,
494 enum rtx_code ref, rtx insn)
496 /* A hard reg in a wide mode may really be multiple registers.
497 If so, mark all of them just like the first. */
498 if (regno < FIRST_PSEUDO_REGISTER)
500 int i = hard_regno_nregs[regno][mode];
501 if (ref == SET)
503 while (--i >= 0)
504 SET_REGNO_REG_SET (reg_pending_sets, regno + i);
506 else if (ref == USE)
508 while (--i >= 0)
509 SET_REGNO_REG_SET (reg_pending_uses, regno + i);
511 else
513 while (--i >= 0)
514 SET_REGNO_REG_SET (reg_pending_clobbers, regno + i);
518 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
519 it does not reload. Ignore these as they have served their
520 purpose already. */
521 else if (regno >= deps->max_reg)
523 enum rtx_code code = GET_CODE (PATTERN (insn));
524 gcc_assert (code == USE || code == CLOBBER);
527 else
529 if (ref == SET)
530 SET_REGNO_REG_SET (reg_pending_sets, regno);
531 else if (ref == USE)
532 SET_REGNO_REG_SET (reg_pending_uses, regno);
533 else
534 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
536 /* Pseudos that are REG_EQUIV to something may be replaced
537 by that during reloading. We need only add dependencies for
538 the address in the REG_EQUIV note. */
539 if (!reload_completed && get_reg_known_equiv_p (regno))
541 rtx t = get_reg_known_value (regno);
542 if (MEM_P (t))
543 sched_analyze_2 (deps, XEXP (t, 0), insn);
546 /* Don't let it cross a call after scheduling if it doesn't
547 already cross one. */
548 if (REG_N_CALLS_CROSSED (regno) == 0)
550 if (ref == USE)
551 deps->sched_before_next_call
552 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
553 else
554 add_dependence_list (insn, deps->last_function_call, 1,
555 REG_DEP_ANTI);
560 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
561 rtx, X, creating all dependencies generated by the write to the
562 destination of X, and reads of everything mentioned. */
564 static void
565 sched_analyze_1 (struct deps *deps, rtx x, rtx insn)
567 rtx dest = XEXP (x, 0);
568 enum rtx_code code = GET_CODE (x);
570 if (dest == 0)
571 return;
573 if (GET_CODE (dest) == PARALLEL)
575 int i;
577 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
578 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
579 sched_analyze_1 (deps,
580 gen_rtx_CLOBBER (VOIDmode,
581 XEXP (XVECEXP (dest, 0, i), 0)),
582 insn);
584 if (GET_CODE (x) == SET)
585 sched_analyze_2 (deps, SET_SRC (x), insn);
586 return;
589 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
590 || GET_CODE (dest) == ZERO_EXTRACT)
592 if (GET_CODE (dest) == STRICT_LOW_PART
593 || GET_CODE (dest) == ZERO_EXTRACT
594 || read_modify_subreg_p (dest))
596 /* These both read and modify the result. We must handle
597 them as writes to get proper dependencies for following
598 instructions. We must handle them as reads to get proper
599 dependencies from this to previous instructions.
600 Thus we need to call sched_analyze_2. */
602 sched_analyze_2 (deps, XEXP (dest, 0), insn);
604 if (GET_CODE (dest) == ZERO_EXTRACT)
606 /* The second and third arguments are values read by this insn. */
607 sched_analyze_2 (deps, XEXP (dest, 1), insn);
608 sched_analyze_2 (deps, XEXP (dest, 2), insn);
610 dest = XEXP (dest, 0);
613 if (REG_P (dest))
615 int regno = REGNO (dest);
616 enum machine_mode mode = GET_MODE (dest);
618 sched_analyze_reg (deps, regno, mode, code, insn);
620 #ifdef STACK_REGS
621 /* Treat all writes to a stack register as modifying the TOS. */
622 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
624 /* Avoid analyzing the same register twice. */
625 if (regno != FIRST_STACK_REG)
626 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
627 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
629 #endif
631 else if (MEM_P (dest))
633 /* Writing memory. */
634 rtx t = dest;
636 if (current_sched_info->use_cselib)
638 t = shallow_copy_rtx (dest);
639 cselib_lookup (XEXP (t, 0), Pmode, 1);
640 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
642 t = canon_rtx (t);
644 if (deps->pending_lists_length > MAX_PENDING_LIST_LENGTH)
646 /* Flush all pending reads and writes to prevent the pending lists
647 from getting any larger. Insn scheduling runs too slowly when
648 these lists get long. When compiling GCC with itself,
649 this flush occurs 8 times for sparc, and 10 times for m88k using
650 the default value of 32. */
651 flush_pending_lists (deps, insn, false, true);
653 else
655 rtx pending, pending_mem;
657 pending = deps->pending_read_insns;
658 pending_mem = deps->pending_read_mems;
659 while (pending)
661 if (anti_dependence (XEXP (pending_mem, 0), t)
662 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
663 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
665 pending = XEXP (pending, 1);
666 pending_mem = XEXP (pending_mem, 1);
669 pending = deps->pending_write_insns;
670 pending_mem = deps->pending_write_mems;
671 while (pending)
673 if (output_dependence (XEXP (pending_mem, 0), t)
674 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
675 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
677 pending = XEXP (pending, 1);
678 pending_mem = XEXP (pending_mem, 1);
681 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
682 REG_DEP_ANTI);
684 add_insn_mem_dependence (deps, &deps->pending_write_insns,
685 &deps->pending_write_mems, insn, dest);
687 sched_analyze_2 (deps, XEXP (dest, 0), insn);
690 /* Analyze reads. */
691 if (GET_CODE (x) == SET)
692 sched_analyze_2 (deps, SET_SRC (x), insn);
695 /* Analyze the uses of memory and registers in rtx X in INSN. */
697 static void
698 sched_analyze_2 (struct deps *deps, rtx x, rtx insn)
700 int i;
701 int j;
702 enum rtx_code code;
703 const char *fmt;
705 if (x == 0)
706 return;
708 code = GET_CODE (x);
710 switch (code)
712 case CONST_INT:
713 case CONST_DOUBLE:
714 case CONST_VECTOR:
715 case SYMBOL_REF:
716 case CONST:
717 case LABEL_REF:
718 /* Ignore constants. Note that we must handle CONST_DOUBLE here
719 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
720 this does not mean that this insn is using cc0. */
721 return;
723 #ifdef HAVE_cc0
724 case CC0:
725 /* User of CC0 depends on immediately preceding insn. */
726 SCHED_GROUP_P (insn) = 1;
727 /* Don't move CC0 setter to another block (it can set up the
728 same flag for previous CC0 users which is safe). */
729 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
730 return;
731 #endif
733 case REG:
735 int regno = REGNO (x);
736 enum machine_mode mode = GET_MODE (x);
738 sched_analyze_reg (deps, regno, mode, USE, insn);
740 #ifdef STACK_REGS
741 /* Treat all reads of a stack register as modifying the TOS. */
742 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
744 /* Avoid analyzing the same register twice. */
745 if (regno != FIRST_STACK_REG)
746 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
747 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
749 #endif
750 return;
753 case MEM:
755 /* Reading memory. */
756 rtx u;
757 rtx pending, pending_mem;
758 rtx t = x;
760 if (current_sched_info->use_cselib)
762 t = shallow_copy_rtx (t);
763 cselib_lookup (XEXP (t, 0), Pmode, 1);
764 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
766 t = canon_rtx (t);
767 pending = deps->pending_read_insns;
768 pending_mem = deps->pending_read_mems;
769 while (pending)
771 if (read_dependence (XEXP (pending_mem, 0), t)
772 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
773 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
775 pending = XEXP (pending, 1);
776 pending_mem = XEXP (pending_mem, 1);
779 pending = deps->pending_write_insns;
780 pending_mem = deps->pending_write_mems;
781 while (pending)
783 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
784 t, rtx_varies_p)
785 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
786 add_dependence (insn, XEXP (pending, 0), REG_DEP_TRUE);
788 pending = XEXP (pending, 1);
789 pending_mem = XEXP (pending_mem, 1);
792 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
793 if (! JUMP_P (XEXP (u, 0)) || deps_may_trap_p (x))
794 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
796 /* Always add these dependencies to pending_reads, since
797 this insn may be followed by a write. */
798 add_insn_mem_dependence (deps, &deps->pending_read_insns,
799 &deps->pending_read_mems, insn, x);
801 /* Take advantage of tail recursion here. */
802 sched_analyze_2 (deps, XEXP (x, 0), insn);
803 return;
806 /* Force pending stores to memory in case a trap handler needs them. */
807 case TRAP_IF:
808 flush_pending_lists (deps, insn, true, false);
809 break;
811 case ASM_OPERANDS:
812 case ASM_INPUT:
813 case UNSPEC_VOLATILE:
815 /* Traditional and volatile asm instructions must be considered to use
816 and clobber all hard registers, all pseudo-registers and all of
817 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
819 Consider for instance a volatile asm that changes the fpu rounding
820 mode. An insn should not be moved across this even if it only uses
821 pseudo-regs because it might give an incorrectly rounded result. */
822 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
823 reg_pending_barrier = TRUE_BARRIER;
825 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
826 We can not just fall through here since then we would be confused
827 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
828 traditional asms unlike their normal usage. */
830 if (code == ASM_OPERANDS)
832 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
833 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
834 return;
836 break;
839 case PRE_DEC:
840 case POST_DEC:
841 case PRE_INC:
842 case POST_INC:
843 /* These both read and modify the result. We must handle them as writes
844 to get proper dependencies for following instructions. We must handle
845 them as reads to get proper dependencies from this to previous
846 instructions. Thus we need to pass them to both sched_analyze_1
847 and sched_analyze_2. We must call sched_analyze_2 first in order
848 to get the proper antecedent for the read. */
849 sched_analyze_2 (deps, XEXP (x, 0), insn);
850 sched_analyze_1 (deps, x, insn);
851 return;
853 case POST_MODIFY:
854 case PRE_MODIFY:
855 /* op0 = op0 + op1 */
856 sched_analyze_2 (deps, XEXP (x, 0), insn);
857 sched_analyze_2 (deps, XEXP (x, 1), insn);
858 sched_analyze_1 (deps, x, insn);
859 return;
861 default:
862 break;
865 /* Other cases: walk the insn. */
866 fmt = GET_RTX_FORMAT (code);
867 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
869 if (fmt[i] == 'e')
870 sched_analyze_2 (deps, XEXP (x, i), insn);
871 else if (fmt[i] == 'E')
872 for (j = 0; j < XVECLEN (x, i); j++)
873 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
877 /* Analyze an INSN with pattern X to find all dependencies. */
879 static void
880 sched_analyze_insn (struct deps *deps, rtx x, rtx insn, rtx loop_notes)
882 RTX_CODE code = GET_CODE (x);
883 rtx link;
884 unsigned i;
885 reg_set_iterator rsi;
887 if (code == COND_EXEC)
889 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
891 /* ??? Should be recording conditions so we reduce the number of
892 false dependencies. */
893 x = COND_EXEC_CODE (x);
894 code = GET_CODE (x);
896 if (code == SET || code == CLOBBER)
898 sched_analyze_1 (deps, x, insn);
900 /* Bare clobber insns are used for letting life analysis, reg-stack
901 and others know that a value is dead. Depend on the last call
902 instruction so that reg-stack won't get confused. */
903 if (code == CLOBBER)
904 add_dependence_list (insn, deps->last_function_call, 1, REG_DEP_OUTPUT);
906 else if (code == PARALLEL)
908 for (i = XVECLEN (x, 0); i--;)
910 rtx sub = XVECEXP (x, 0, i);
911 code = GET_CODE (sub);
913 if (code == COND_EXEC)
915 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
916 sub = COND_EXEC_CODE (sub);
917 code = GET_CODE (sub);
919 if (code == SET || code == CLOBBER)
920 sched_analyze_1 (deps, sub, insn);
921 else
922 sched_analyze_2 (deps, sub, insn);
925 else
926 sched_analyze_2 (deps, x, insn);
928 /* Mark registers CLOBBERED or used by called function. */
929 if (CALL_P (insn))
931 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
933 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
934 sched_analyze_1 (deps, XEXP (link, 0), insn);
935 else
936 sched_analyze_2 (deps, XEXP (link, 0), insn);
938 if (find_reg_note (insn, REG_SETJMP, NULL))
939 reg_pending_barrier = MOVE_BARRIER;
942 if (JUMP_P (insn))
944 rtx next;
945 next = next_nonnote_insn (insn);
946 if (next && BARRIER_P (next))
947 reg_pending_barrier = TRUE_BARRIER;
948 else
950 rtx pending, pending_mem;
951 regset_head tmp_uses, tmp_sets;
952 INIT_REG_SET (&tmp_uses);
953 INIT_REG_SET (&tmp_sets);
955 (*current_sched_info->compute_jump_reg_dependencies)
956 (insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets);
957 /* Make latency of jump equal to 0 by using anti-dependence. */
958 EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, rsi)
960 struct deps_reg *reg_last = &deps->reg_last[i];
961 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
962 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
963 reg_last->uses_length++;
964 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
966 IOR_REG_SET (reg_pending_sets, &tmp_sets);
968 CLEAR_REG_SET (&tmp_uses);
969 CLEAR_REG_SET (&tmp_sets);
971 /* All memory writes and volatile reads must happen before the
972 jump. Non-volatile reads must happen before the jump iff
973 the result is needed by the above register used mask. */
975 pending = deps->pending_write_insns;
976 pending_mem = deps->pending_write_mems;
977 while (pending)
979 if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
980 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
981 pending = XEXP (pending, 1);
982 pending_mem = XEXP (pending_mem, 1);
985 pending = deps->pending_read_insns;
986 pending_mem = deps->pending_read_mems;
987 while (pending)
989 if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
990 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
991 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
992 pending = XEXP (pending, 1);
993 pending_mem = XEXP (pending_mem, 1);
996 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
997 REG_DEP_ANTI);
1001 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
1002 block, then we must be sure that no instructions are scheduled across it.
1003 Otherwise, the reg_n_refs info (which depends on loop_depth) would
1004 become incorrect. */
1005 if (loop_notes)
1007 rtx link;
1009 /* Update loop_notes with any notes from this insn. */
1010 link = loop_notes;
1011 while (XEXP (link, 1))
1013 gcc_assert (INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_BEG
1014 || INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END);
1016 reg_pending_barrier = MOVE_BARRIER;
1017 link = XEXP (link, 1);
1019 XEXP (link, 1) = REG_NOTES (insn);
1020 REG_NOTES (insn) = loop_notes;
1023 /* If this instruction can throw an exception, then moving it changes
1024 where block boundaries fall. This is mighty confusing elsewhere.
1025 Therefore, prevent such an instruction from being moved. */
1026 if (can_throw_internal (insn))
1027 reg_pending_barrier = MOVE_BARRIER;
1029 /* Add dependencies if a scheduling barrier was found. */
1030 if (reg_pending_barrier)
1032 /* In the case of barrier the most added dependencies are not
1033 real, so we use anti-dependence here. */
1034 if (sched_get_condition (insn))
1036 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
1038 struct deps_reg *reg_last = &deps->reg_last[i];
1039 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
1040 add_dependence_list
1041 (insn, reg_last->sets, 0,
1042 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
1043 add_dependence_list
1044 (insn, reg_last->clobbers, 0,
1045 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
1048 else
1050 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
1052 struct deps_reg *reg_last = &deps->reg_last[i];
1053 add_dependence_list_and_free (insn, &reg_last->uses, 0,
1054 REG_DEP_ANTI);
1055 add_dependence_list_and_free
1056 (insn, &reg_last->sets, 0,
1057 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
1058 add_dependence_list_and_free
1059 (insn, &reg_last->clobbers, 0,
1060 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
1061 reg_last->uses_length = 0;
1062 reg_last->clobbers_length = 0;
1066 for (i = 0; i < (unsigned)deps->max_reg; i++)
1068 struct deps_reg *reg_last = &deps->reg_last[i];
1069 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1070 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1073 flush_pending_lists (deps, insn, true, true);
1074 CLEAR_REG_SET (&deps->reg_conditional_sets);
1075 reg_pending_barrier = NOT_A_BARRIER;
1077 else
1079 /* If the current insn is conditional, we can't free any
1080 of the lists. */
1081 if (sched_get_condition (insn))
1083 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1085 struct deps_reg *reg_last = &deps->reg_last[i];
1086 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
1087 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
1088 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1089 reg_last->uses_length++;
1091 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
1093 struct deps_reg *reg_last = &deps->reg_last[i];
1094 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
1095 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
1096 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1097 reg_last->clobbers_length++;
1099 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
1101 struct deps_reg *reg_last = &deps->reg_last[i];
1102 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
1103 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
1104 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
1105 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1106 SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
1109 else
1111 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1113 struct deps_reg *reg_last = &deps->reg_last[i];
1114 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
1115 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
1116 reg_last->uses_length++;
1117 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1119 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
1121 struct deps_reg *reg_last = &deps->reg_last[i];
1122 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
1123 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
1125 add_dependence_list_and_free (insn, &reg_last->sets, 0,
1126 REG_DEP_OUTPUT);
1127 add_dependence_list_and_free (insn, &reg_last->uses, 0,
1128 REG_DEP_ANTI);
1129 add_dependence_list_and_free (insn, &reg_last->clobbers, 0,
1130 REG_DEP_OUTPUT);
1131 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1132 reg_last->clobbers_length = 0;
1133 reg_last->uses_length = 0;
1135 else
1137 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
1138 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
1140 reg_last->clobbers_length++;
1141 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1143 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
1145 struct deps_reg *reg_last = &deps->reg_last[i];
1146 add_dependence_list_and_free (insn, &reg_last->sets, 0,
1147 REG_DEP_OUTPUT);
1148 add_dependence_list_and_free (insn, &reg_last->clobbers, 0,
1149 REG_DEP_OUTPUT);
1150 add_dependence_list_and_free (insn, &reg_last->uses, 0,
1151 REG_DEP_ANTI);
1152 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1153 reg_last->uses_length = 0;
1154 reg_last->clobbers_length = 0;
1155 CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i);
1159 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
1160 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
1161 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
1163 CLEAR_REG_SET (reg_pending_uses);
1164 CLEAR_REG_SET (reg_pending_clobbers);
1165 CLEAR_REG_SET (reg_pending_sets);
1167 /* If we are currently in a libcall scheduling group, then mark the
1168 current insn as being in a scheduling group and that it can not
1169 be moved into a different basic block. */
1171 if (deps->libcall_block_tail_insn)
1173 SCHED_GROUP_P (insn) = 1;
1174 CANT_MOVE (insn) = 1;
1177 /* If a post-call group is still open, see if it should remain so.
1178 This insn must be a simple move of a hard reg to a pseudo or
1179 vice-versa.
1181 We must avoid moving these insns for correctness on
1182 SMALL_REGISTER_CLASS machines, and for special registers like
1183 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1184 hard regs for all targets. */
1186 if (deps->in_post_call_group_p)
1188 rtx tmp, set = single_set (insn);
1189 int src_regno, dest_regno;
1191 if (set == NULL)
1192 goto end_call_group;
1194 tmp = SET_DEST (set);
1195 if (GET_CODE (tmp) == SUBREG)
1196 tmp = SUBREG_REG (tmp);
1197 if (REG_P (tmp))
1198 dest_regno = REGNO (tmp);
1199 else
1200 goto end_call_group;
1202 tmp = SET_SRC (set);
1203 if (GET_CODE (tmp) == SUBREG)
1204 tmp = SUBREG_REG (tmp);
1205 if ((GET_CODE (tmp) == PLUS
1206 || GET_CODE (tmp) == MINUS)
1207 && REG_P (XEXP (tmp, 0))
1208 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
1209 && dest_regno == STACK_POINTER_REGNUM)
1210 src_regno = STACK_POINTER_REGNUM;
1211 else if (REG_P (tmp))
1212 src_regno = REGNO (tmp);
1213 else
1214 goto end_call_group;
1216 if (src_regno < FIRST_PSEUDO_REGISTER
1217 || dest_regno < FIRST_PSEUDO_REGISTER)
1219 if (deps->in_post_call_group_p == post_call_initial)
1220 deps->in_post_call_group_p = post_call;
1222 SCHED_GROUP_P (insn) = 1;
1223 CANT_MOVE (insn) = 1;
1225 else
1227 end_call_group:
1228 deps->in_post_call_group_p = not_post_call;
1232 /* Fixup the dependencies in the sched group. */
1233 if (SCHED_GROUP_P (insn))
1234 fixup_sched_groups (insn);
1237 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1238 for every dependency. */
1240 void
1241 sched_analyze (struct deps *deps, rtx head, rtx tail)
1243 rtx insn;
1244 rtx loop_notes = 0;
1246 if (current_sched_info->use_cselib)
1247 cselib_init (true);
1249 /* Before reload, if the previous block ended in a call, show that
1250 we are inside a post-call group, so as to keep the lifetimes of
1251 hard registers correct. */
1252 if (! reload_completed && !LABEL_P (head))
1254 insn = prev_nonnote_insn (head);
1255 if (insn && CALL_P (insn))
1256 deps->in_post_call_group_p = post_call_initial;
1258 for (insn = head;; insn = NEXT_INSN (insn))
1260 rtx link, end_seq, r0, set;
1262 if (NONJUMP_INSN_P (insn) || JUMP_P (insn))
1264 /* Clear out the stale LOG_LINKS from flow. */
1265 free_INSN_LIST_list (&LOG_LINKS (insn));
1267 /* Make each JUMP_INSN a scheduling barrier for memory
1268 references. */
1269 if (JUMP_P (insn))
1271 /* Keep the list a reasonable size. */
1272 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
1273 flush_pending_lists (deps, insn, true, true);
1274 else
1275 deps->last_pending_memory_flush
1276 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
1278 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1279 loop_notes = 0;
1281 else if (CALL_P (insn))
1283 int i;
1285 CANT_MOVE (insn) = 1;
1287 /* Clear out the stale LOG_LINKS from flow. */
1288 free_INSN_LIST_list (&LOG_LINKS (insn));
1290 if (find_reg_note (insn, REG_SETJMP, NULL))
1292 /* This is setjmp. Assume that all registers, not just
1293 hard registers, may be clobbered by this call. */
1294 reg_pending_barrier = MOVE_BARRIER;
1296 else
1298 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1299 /* A call may read and modify global register variables. */
1300 if (global_regs[i])
1302 SET_REGNO_REG_SET (reg_pending_sets, i);
1303 SET_REGNO_REG_SET (reg_pending_uses, i);
1305 /* Other call-clobbered hard regs may be clobbered.
1306 Since we only have a choice between 'might be clobbered'
1307 and 'definitely not clobbered', we must include all
1308 partly call-clobbered registers here. */
1309 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
1310 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
1311 SET_REGNO_REG_SET (reg_pending_clobbers, i);
1312 /* We don't know what set of fixed registers might be used
1313 by the function, but it is certain that the stack pointer
1314 is among them, but be conservative. */
1315 else if (fixed_regs[i])
1316 SET_REGNO_REG_SET (reg_pending_uses, i);
1317 /* The frame pointer is normally not used by the function
1318 itself, but by the debugger. */
1319 /* ??? MIPS o32 is an exception. It uses the frame pointer
1320 in the macro expansion of jal but does not represent this
1321 fact in the call_insn rtl. */
1322 else if (i == FRAME_POINTER_REGNUM
1323 || (i == HARD_FRAME_POINTER_REGNUM
1324 && (! reload_completed || frame_pointer_needed)))
1325 SET_REGNO_REG_SET (reg_pending_uses, i);
1328 /* For each insn which shouldn't cross a call, add a dependence
1329 between that insn and this call insn. */
1330 add_dependence_list_and_free (insn, &deps->sched_before_next_call, 1,
1331 REG_DEP_ANTI);
1333 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1334 loop_notes = 0;
1336 /* In the absence of interprocedural alias analysis, we must flush
1337 all pending reads and writes, and start new dependencies starting
1338 from here. But only flush writes for constant calls (which may
1339 be passed a pointer to something we haven't written yet). */
1340 flush_pending_lists (deps, insn, true, !CONST_OR_PURE_CALL_P (insn));
1342 /* Remember the last function call for limiting lifetimes. */
1343 free_INSN_LIST_list (&deps->last_function_call);
1344 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
1346 /* Before reload, begin a post-call group, so as to keep the
1347 lifetimes of hard registers correct. */
1348 if (! reload_completed)
1349 deps->in_post_call_group_p = post_call;
1352 /* EH_REGION insn notes can not appear until well after we complete
1353 scheduling. */
1354 if (NOTE_P (insn))
1355 gcc_assert (NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
1356 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END);
1358 /* See comments on reemit_notes as to why we do this.
1359 ??? Actually, the reemit_notes just say what is done, not why. */
1361 if (NOTE_P (insn)
1362 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
1363 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END))
1365 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1366 GEN_INT (NOTE_LINE_NUMBER (insn)),
1367 loop_notes);
1368 CONST_OR_PURE_CALL_P (loop_notes) = CONST_OR_PURE_CALL_P (insn);
1371 if (current_sched_info->use_cselib)
1372 cselib_process_insn (insn);
1374 /* Now that we have completed handling INSN, check and see if it is
1375 a CLOBBER beginning a libcall block. If it is, record the
1376 end of the libcall sequence.
1378 We want to schedule libcall blocks as a unit before reload. While
1379 this restricts scheduling, it preserves the meaning of a libcall
1380 block.
1382 As a side effect, we may get better code due to decreased register
1383 pressure as well as less chance of a foreign insn appearing in
1384 a libcall block. */
1385 if (!reload_completed
1386 /* Note we may have nested libcall sequences. We only care about
1387 the outermost libcall sequence. */
1388 && deps->libcall_block_tail_insn == 0
1389 /* The sequence must start with a clobber of a register. */
1390 && NONJUMP_INSN_P (insn)
1391 && GET_CODE (PATTERN (insn)) == CLOBBER
1392 && (r0 = XEXP (PATTERN (insn), 0), REG_P (r0))
1393 && REG_P (XEXP (PATTERN (insn), 0))
1394 /* The CLOBBER must also have a REG_LIBCALL note attached. */
1395 && (link = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0
1396 && (end_seq = XEXP (link, 0)) != 0
1397 /* The insn referenced by the REG_LIBCALL note must be a
1398 simple nop copy with the same destination as the register
1399 mentioned in the clobber. */
1400 && (set = single_set (end_seq)) != 0
1401 && SET_DEST (set) == r0 && SET_SRC (set) == r0
1402 /* And finally the insn referenced by the REG_LIBCALL must
1403 also contain a REG_EQUAL note and a REG_RETVAL note. */
1404 && find_reg_note (end_seq, REG_EQUAL, NULL_RTX) != 0
1405 && find_reg_note (end_seq, REG_RETVAL, NULL_RTX) != 0)
1406 deps->libcall_block_tail_insn = XEXP (link, 0);
1408 /* If we have reached the end of a libcall block, then close the
1409 block. */
1410 if (deps->libcall_block_tail_insn == insn)
1411 deps->libcall_block_tail_insn = 0;
1413 if (insn == tail)
1415 if (current_sched_info->use_cselib)
1416 cselib_finish ();
1417 return;
1420 gcc_unreachable ();
1424 /* The following function adds forward dependence (FROM, TO) with
1425 given DEP_TYPE. The forward dependence should be not exist before. */
1427 void
1428 add_forward_dependence (rtx from, rtx to, enum reg_note dep_type)
1430 rtx new_link;
1432 #ifdef ENABLE_CHECKING
1433 /* If add_dependence is working properly there should never
1434 be notes, deleted insns or duplicates in the backward
1435 links. Thus we need not check for them here.
1437 However, if we have enabled checking we might as well go
1438 ahead and verify that add_dependence worked properly. */
1439 gcc_assert (!NOTE_P (from));
1440 gcc_assert (!INSN_DELETED_P (from));
1441 if (forward_dependency_cache)
1442 gcc_assert (!bitmap_bit_p (&forward_dependency_cache[INSN_LUID (from)],
1443 INSN_LUID (to)));
1444 else
1445 gcc_assert (!find_insn_list (to, INSN_DEPEND (from)));
1447 /* ??? If bitmap_bit_p is a predicate, what is this supposed to do? */
1448 if (forward_dependency_cache != NULL)
1449 bitmap_bit_p (&forward_dependency_cache[INSN_LUID (from)],
1450 INSN_LUID (to));
1451 #endif
1453 new_link = alloc_INSN_LIST (to, INSN_DEPEND (from));
1455 PUT_REG_NOTE_KIND (new_link, dep_type);
1457 INSN_DEPEND (from) = new_link;
1458 INSN_DEP_COUNT (to) += 1;
1461 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1462 dependences from LOG_LINKS to build forward dependences in
1463 INSN_DEPEND. */
1465 void
1466 compute_forward_dependences (rtx head, rtx tail)
1468 rtx insn, link;
1469 rtx next_tail;
1471 next_tail = NEXT_INSN (tail);
1472 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1474 if (! INSN_P (insn))
1475 continue;
1477 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
1478 add_forward_dependence (XEXP (link, 0), insn, REG_NOTE_KIND (link));
1482 /* Initialize variables for region data dependence analysis.
1483 n_bbs is the number of region blocks. */
1485 void
1486 init_deps (struct deps *deps)
1488 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
1490 deps->max_reg = max_reg;
1491 deps->reg_last = xcalloc (max_reg, sizeof (struct deps_reg));
1492 INIT_REG_SET (&deps->reg_last_in_use);
1493 INIT_REG_SET (&deps->reg_conditional_sets);
1495 deps->pending_read_insns = 0;
1496 deps->pending_read_mems = 0;
1497 deps->pending_write_insns = 0;
1498 deps->pending_write_mems = 0;
1499 deps->pending_lists_length = 0;
1500 deps->pending_flush_length = 0;
1501 deps->last_pending_memory_flush = 0;
1502 deps->last_function_call = 0;
1503 deps->sched_before_next_call = 0;
1504 deps->in_post_call_group_p = not_post_call;
1505 deps->libcall_block_tail_insn = 0;
1508 /* Free insn lists found in DEPS. */
1510 void
1511 free_deps (struct deps *deps)
1513 unsigned i;
1514 reg_set_iterator rsi;
1516 free_INSN_LIST_list (&deps->pending_read_insns);
1517 free_EXPR_LIST_list (&deps->pending_read_mems);
1518 free_INSN_LIST_list (&deps->pending_write_insns);
1519 free_EXPR_LIST_list (&deps->pending_write_mems);
1520 free_INSN_LIST_list (&deps->last_pending_memory_flush);
1522 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
1523 times. For a testcase with 42000 regs and 8000 small basic blocks,
1524 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
1525 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
1527 struct deps_reg *reg_last = &deps->reg_last[i];
1528 if (reg_last->uses)
1529 free_INSN_LIST_list (&reg_last->uses);
1530 if (reg_last->sets)
1531 free_INSN_LIST_list (&reg_last->sets);
1532 if (reg_last->clobbers)
1533 free_INSN_LIST_list (&reg_last->clobbers);
1535 CLEAR_REG_SET (&deps->reg_last_in_use);
1536 CLEAR_REG_SET (&deps->reg_conditional_sets);
1538 free (deps->reg_last);
1541 /* If it is profitable to use them, initialize caches for tracking
1542 dependency information. LUID is the number of insns to be scheduled,
1543 it is used in the estimate of profitability. */
1545 void
1546 init_dependency_caches (int luid)
1548 /* ?!? We could save some memory by computing a per-region luid mapping
1549 which could reduce both the number of vectors in the cache and the size
1550 of each vector. Instead we just avoid the cache entirely unless the
1551 average number of instructions in a basic block is very high. See
1552 the comment before the declaration of true_dependency_cache for
1553 what we consider "very high". */
1554 if (luid / n_basic_blocks > 100 * 5)
1556 int i;
1557 true_dependency_cache = xmalloc (luid * sizeof (bitmap_head));
1558 anti_dependency_cache = xmalloc (luid * sizeof (bitmap_head));
1559 output_dependency_cache = xmalloc (luid * sizeof (bitmap_head));
1560 #ifdef ENABLE_CHECKING
1561 forward_dependency_cache = xmalloc (luid * sizeof (bitmap_head));
1562 #endif
1563 for (i = 0; i < luid; i++)
1565 bitmap_initialize (&true_dependency_cache[i], 0);
1566 bitmap_initialize (&anti_dependency_cache[i], 0);
1567 bitmap_initialize (&output_dependency_cache[i], 0);
1568 #ifdef ENABLE_CHECKING
1569 bitmap_initialize (&forward_dependency_cache[i], 0);
1570 #endif
1572 cache_size = luid;
1576 /* Free the caches allocated in init_dependency_caches. */
1578 void
1579 free_dependency_caches (void)
1581 if (true_dependency_cache)
1583 int i;
1585 for (i = 0; i < cache_size; i++)
1587 bitmap_clear (&true_dependency_cache[i]);
1588 bitmap_clear (&anti_dependency_cache[i]);
1589 bitmap_clear (&output_dependency_cache[i]);
1590 #ifdef ENABLE_CHECKING
1591 bitmap_clear (&forward_dependency_cache[i]);
1592 #endif
1594 free (true_dependency_cache);
1595 true_dependency_cache = NULL;
1596 free (anti_dependency_cache);
1597 anti_dependency_cache = NULL;
1598 free (output_dependency_cache);
1599 output_dependency_cache = NULL;
1600 #ifdef ENABLE_CHECKING
1601 free (forward_dependency_cache);
1602 forward_dependency_cache = NULL;
1603 #endif
1607 /* Initialize some global variables needed by the dependency analysis
1608 code. */
1610 void
1611 init_deps_global (void)
1613 reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
1614 reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
1615 reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
1616 reg_pending_barrier = NOT_A_BARRIER;
1619 /* Free everything used by the dependency analysis code. */
1621 void
1622 finish_deps_global (void)
1624 FREE_REG_SET (reg_pending_sets);
1625 FREE_REG_SET (reg_pending_clobbers);
1626 FREE_REG_SET (reg_pending_uses);