Bump version number, post release.
[official-gcc.git] / gcc-4_9-branch / gcc / sched-deps.c
blob2c501cf69681198f594b79ecff6a7a9236fc6057
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992-2014 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "diagnostic-core.h"
28 #include "rtl.h"
29 #include "tree.h" /* FIXME: Used by call_may_noreturn_p. */
30 #include "tm_p.h"
31 #include "hard-reg-set.h"
32 #include "regs.h"
33 #include "function.h"
34 #include "flags.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
37 #include "except.h"
38 #include "recog.h"
39 #include "emit-rtl.h"
40 #include "sched-int.h"
41 #include "params.h"
42 #include "cselib.h"
43 #include "ira.h"
44 #include "target.h"
46 #ifdef INSN_SCHEDULING
48 #ifdef ENABLE_CHECKING
49 #define CHECK (true)
50 #else
51 #define CHECK (false)
52 #endif
54 /* Holds current parameters for the dependency analyzer. */
55 struct sched_deps_info_def *sched_deps_info;
57 /* The data is specific to the Haifa scheduler. */
58 vec<haifa_deps_insn_data_def>
59 h_d_i_d = vNULL;
61 /* Return the major type present in the DS. */
62 enum reg_note
63 ds_to_dk (ds_t ds)
65 if (ds & DEP_TRUE)
66 return REG_DEP_TRUE;
68 if (ds & DEP_OUTPUT)
69 return REG_DEP_OUTPUT;
71 if (ds & DEP_CONTROL)
72 return REG_DEP_CONTROL;
74 gcc_assert (ds & DEP_ANTI);
76 return REG_DEP_ANTI;
79 /* Return equivalent dep_status. */
80 ds_t
81 dk_to_ds (enum reg_note dk)
83 switch (dk)
85 case REG_DEP_TRUE:
86 return DEP_TRUE;
88 case REG_DEP_OUTPUT:
89 return DEP_OUTPUT;
91 case REG_DEP_CONTROL:
92 return DEP_CONTROL;
94 default:
95 gcc_assert (dk == REG_DEP_ANTI);
96 return DEP_ANTI;
100 /* Functions to operate with dependence information container - dep_t. */
102 /* Init DEP with the arguments. */
103 void
104 init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note type, ds_t ds)
106 DEP_PRO (dep) = pro;
107 DEP_CON (dep) = con;
108 DEP_TYPE (dep) = type;
109 DEP_STATUS (dep) = ds;
110 DEP_COST (dep) = UNKNOWN_DEP_COST;
111 DEP_NONREG (dep) = 0;
112 DEP_MULTIPLE (dep) = 0;
113 DEP_REPLACE (dep) = NULL;
116 /* Init DEP with the arguments.
117 While most of the scheduler (including targets) only need the major type
118 of the dependency, it is convenient to hide full dep_status from them. */
119 void
120 init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
122 ds_t ds;
124 if ((current_sched_info->flags & USE_DEPS_LIST))
125 ds = dk_to_ds (kind);
126 else
127 ds = 0;
129 init_dep_1 (dep, pro, con, kind, ds);
132 /* Make a copy of FROM in TO. */
133 static void
134 copy_dep (dep_t to, dep_t from)
136 memcpy (to, from, sizeof (*to));
139 static void dump_ds (FILE *, ds_t);
141 /* Define flags for dump_dep (). */
143 /* Dump producer of the dependence. */
144 #define DUMP_DEP_PRO (2)
146 /* Dump consumer of the dependence. */
147 #define DUMP_DEP_CON (4)
149 /* Dump type of the dependence. */
150 #define DUMP_DEP_TYPE (8)
152 /* Dump status of the dependence. */
153 #define DUMP_DEP_STATUS (16)
155 /* Dump all information about the dependence. */
156 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
157 |DUMP_DEP_STATUS)
159 /* Dump DEP to DUMP.
160 FLAGS is a bit mask specifying what information about DEP needs
161 to be printed.
162 If FLAGS has the very first bit set, then dump all information about DEP
163 and propagate this bit into the callee dump functions. */
164 static void
165 dump_dep (FILE *dump, dep_t dep, int flags)
167 if (flags & 1)
168 flags |= DUMP_DEP_ALL;
170 fprintf (dump, "<");
172 if (flags & DUMP_DEP_PRO)
173 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
175 if (flags & DUMP_DEP_CON)
176 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
178 if (flags & DUMP_DEP_TYPE)
180 char t;
181 enum reg_note type = DEP_TYPE (dep);
183 switch (type)
185 case REG_DEP_TRUE:
186 t = 't';
187 break;
189 case REG_DEP_OUTPUT:
190 t = 'o';
191 break;
193 case REG_DEP_CONTROL:
194 t = 'c';
195 break;
197 case REG_DEP_ANTI:
198 t = 'a';
199 break;
201 default:
202 gcc_unreachable ();
203 break;
206 fprintf (dump, "%c; ", t);
209 if (flags & DUMP_DEP_STATUS)
211 if (current_sched_info->flags & USE_DEPS_LIST)
212 dump_ds (dump, DEP_STATUS (dep));
215 fprintf (dump, ">");
218 /* Default flags for dump_dep (). */
219 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
221 /* Dump all fields of DEP to STDERR. */
222 void
223 sd_debug_dep (dep_t dep)
225 dump_dep (stderr, dep, 1);
226 fprintf (stderr, "\n");
229 /* Determine whether DEP is a dependency link of a non-debug insn on a
230 debug insn. */
232 static inline bool
233 depl_on_debug_p (dep_link_t dep)
235 return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
236 && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
239 /* Functions to operate with a single link from the dependencies lists -
240 dep_link_t. */
242 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
243 PREV_NEXT_P. */
244 static void
245 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
247 dep_link_t next = *prev_nextp;
249 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
250 && DEP_LINK_NEXT (l) == NULL);
252 /* Init node being inserted. */
253 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
254 DEP_LINK_NEXT (l) = next;
256 /* Fix next node. */
257 if (next != NULL)
259 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
261 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
264 /* Fix prev node. */
265 *prev_nextp = l;
268 /* Add dep_link LINK to deps_list L. */
269 static void
270 add_to_deps_list (dep_link_t link, deps_list_t l)
272 attach_dep_link (link, &DEPS_LIST_FIRST (l));
274 /* Don't count debug deps. */
275 if (!depl_on_debug_p (link))
276 ++DEPS_LIST_N_LINKS (l);
279 /* Detach dep_link L from the list. */
280 static void
281 detach_dep_link (dep_link_t l)
283 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
284 dep_link_t next = DEP_LINK_NEXT (l);
286 *prev_nextp = next;
288 if (next != NULL)
289 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
291 DEP_LINK_PREV_NEXTP (l) = NULL;
292 DEP_LINK_NEXT (l) = NULL;
295 /* Remove link LINK from list LIST. */
296 static void
297 remove_from_deps_list (dep_link_t link, deps_list_t list)
299 detach_dep_link (link);
301 /* Don't count debug deps. */
302 if (!depl_on_debug_p (link))
303 --DEPS_LIST_N_LINKS (list);
306 /* Move link LINK from list FROM to list TO. */
307 static void
308 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
310 remove_from_deps_list (link, from);
311 add_to_deps_list (link, to);
314 /* Return true of LINK is not attached to any list. */
315 static bool
316 dep_link_is_detached_p (dep_link_t link)
318 return DEP_LINK_PREV_NEXTP (link) == NULL;
321 /* Pool to hold all dependency nodes (dep_node_t). */
322 static alloc_pool dn_pool;
324 /* Number of dep_nodes out there. */
325 static int dn_pool_diff = 0;
327 /* Create a dep_node. */
328 static dep_node_t
329 create_dep_node (void)
331 dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
332 dep_link_t back = DEP_NODE_BACK (n);
333 dep_link_t forw = DEP_NODE_FORW (n);
335 DEP_LINK_NODE (back) = n;
336 DEP_LINK_NEXT (back) = NULL;
337 DEP_LINK_PREV_NEXTP (back) = NULL;
339 DEP_LINK_NODE (forw) = n;
340 DEP_LINK_NEXT (forw) = NULL;
341 DEP_LINK_PREV_NEXTP (forw) = NULL;
343 ++dn_pool_diff;
345 return n;
348 /* Delete dep_node N. N must not be connected to any deps_list. */
349 static void
350 delete_dep_node (dep_node_t n)
352 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
353 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
355 XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)));
357 --dn_pool_diff;
359 pool_free (dn_pool, n);
362 /* Pool to hold dependencies lists (deps_list_t). */
363 static alloc_pool dl_pool;
365 /* Number of deps_lists out there. */
366 static int dl_pool_diff = 0;
368 /* Functions to operate with dependences lists - deps_list_t. */
370 /* Return true if list L is empty. */
371 static bool
372 deps_list_empty_p (deps_list_t l)
374 return DEPS_LIST_N_LINKS (l) == 0;
377 /* Create a new deps_list. */
378 static deps_list_t
379 create_deps_list (void)
381 deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
383 DEPS_LIST_FIRST (l) = NULL;
384 DEPS_LIST_N_LINKS (l) = 0;
386 ++dl_pool_diff;
387 return l;
390 /* Free deps_list L. */
391 static void
392 free_deps_list (deps_list_t l)
394 gcc_assert (deps_list_empty_p (l));
396 --dl_pool_diff;
398 pool_free (dl_pool, l);
401 /* Return true if there is no dep_nodes and deps_lists out there.
402 After the region is scheduled all the dependency nodes and lists
403 should [generally] be returned to pool. */
404 bool
405 deps_pools_are_empty_p (void)
407 return dn_pool_diff == 0 && dl_pool_diff == 0;
410 /* Remove all elements from L. */
411 static void
412 clear_deps_list (deps_list_t l)
416 dep_link_t link = DEPS_LIST_FIRST (l);
418 if (link == NULL)
419 break;
421 remove_from_deps_list (link, l);
423 while (1);
426 /* Decide whether a dependency should be treated as a hard or a speculative
427 dependency. */
428 static bool
429 dep_spec_p (dep_t dep)
431 if (current_sched_info->flags & DO_SPECULATION)
433 if (DEP_STATUS (dep) & SPECULATIVE)
434 return true;
436 if (current_sched_info->flags & DO_PREDICATION)
438 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
439 return true;
441 if (DEP_REPLACE (dep) != NULL)
442 return true;
443 return false;
446 static regset reg_pending_sets;
447 static regset reg_pending_clobbers;
448 static regset reg_pending_uses;
449 static regset reg_pending_control_uses;
450 static enum reg_pending_barrier_mode reg_pending_barrier;
452 /* Hard registers implicitly clobbered or used (or may be implicitly
453 clobbered or used) by the currently analyzed insn. For example,
454 insn in its constraint has one register class. Even if there is
455 currently no hard register in the insn, the particular hard
456 register will be in the insn after reload pass because the
457 constraint requires it. */
458 static HARD_REG_SET implicit_reg_pending_clobbers;
459 static HARD_REG_SET implicit_reg_pending_uses;
461 /* To speed up the test for duplicate dependency links we keep a
462 record of dependencies created by add_dependence when the average
463 number of instructions in a basic block is very large.
465 Studies have shown that there is typically around 5 instructions between
466 branches for typical C code. So we can make a guess that the average
467 basic block is approximately 5 instructions long; we will choose 100X
468 the average size as a very large basic block.
470 Each insn has associated bitmaps for its dependencies. Each bitmap
471 has enough entries to represent a dependency on any other insn in
472 the insn chain. All bitmap for true dependencies cache is
473 allocated then the rest two ones are also allocated. */
474 static bitmap_head *true_dependency_cache = NULL;
475 static bitmap_head *output_dependency_cache = NULL;
476 static bitmap_head *anti_dependency_cache = NULL;
477 static bitmap_head *control_dependency_cache = NULL;
478 static bitmap_head *spec_dependency_cache = NULL;
479 static int cache_size;
481 /* True if we should mark added dependencies as a non-register deps. */
482 static bool mark_as_hard;
484 static int deps_may_trap_p (const_rtx);
485 static void add_dependence_1 (rtx, rtx, enum reg_note);
486 static void add_dependence_list (rtx, rtx, int, enum reg_note, bool);
487 static void add_dependence_list_and_free (struct deps_desc *, rtx,
488 rtx *, int, enum reg_note, bool);
489 static void delete_all_dependences (rtx);
490 static void chain_to_prev_insn (rtx);
492 static void flush_pending_lists (struct deps_desc *, rtx, int, int);
493 static void sched_analyze_1 (struct deps_desc *, rtx, rtx);
494 static void sched_analyze_2 (struct deps_desc *, rtx, rtx);
495 static void sched_analyze_insn (struct deps_desc *, rtx, rtx);
497 static bool sched_has_condition_p (const_rtx);
498 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
500 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
501 rtx, rtx);
502 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
504 #ifdef ENABLE_CHECKING
505 static void check_dep (dep_t, bool);
506 #endif
508 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
510 static int
511 deps_may_trap_p (const_rtx mem)
513 const_rtx addr = XEXP (mem, 0);
515 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
517 const_rtx t = get_reg_known_value (REGNO (addr));
518 if (t)
519 addr = t;
521 return rtx_addr_can_trap_p (addr);
525 /* Find the condition under which INSN is executed. If REV is not NULL,
526 it is set to TRUE when the returned comparison should be reversed
527 to get the actual condition. */
528 static rtx
529 sched_get_condition_with_rev_uncached (const_rtx insn, bool *rev)
531 rtx pat = PATTERN (insn);
532 rtx src;
534 if (rev)
535 *rev = false;
537 if (GET_CODE (pat) == COND_EXEC)
538 return COND_EXEC_TEST (pat);
540 if (!any_condjump_p (insn) || !onlyjump_p (insn))
541 return 0;
543 src = SET_SRC (pc_set (insn));
545 if (XEXP (src, 2) == pc_rtx)
546 return XEXP (src, 0);
547 else if (XEXP (src, 1) == pc_rtx)
549 rtx cond = XEXP (src, 0);
550 enum rtx_code revcode = reversed_comparison_code (cond, insn);
552 if (revcode == UNKNOWN)
553 return 0;
555 if (rev)
556 *rev = true;
557 return cond;
560 return 0;
563 /* Return the condition under which INSN does not execute (i.e. the
564 not-taken condition for a conditional branch), or NULL if we cannot
565 find such a condition. The caller should make a copy of the condition
566 before using it. */
568 sched_get_reverse_condition_uncached (const_rtx insn)
570 bool rev;
571 rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
572 if (cond == NULL_RTX)
573 return cond;
574 if (!rev)
576 enum rtx_code revcode = reversed_comparison_code (cond, insn);
577 cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
578 XEXP (cond, 0),
579 XEXP (cond, 1));
581 return cond;
584 /* Caching variant of sched_get_condition_with_rev_uncached.
585 We only do actual work the first time we come here for an insn; the
586 results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */
587 static rtx
588 sched_get_condition_with_rev (const_rtx insn, bool *rev)
590 bool tmp;
592 if (INSN_LUID (insn) == 0)
593 return sched_get_condition_with_rev_uncached (insn, rev);
595 if (INSN_CACHED_COND (insn) == const_true_rtx)
596 return NULL_RTX;
598 if (INSN_CACHED_COND (insn) != NULL_RTX)
600 if (rev)
601 *rev = INSN_REVERSE_COND (insn);
602 return INSN_CACHED_COND (insn);
605 INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
606 INSN_REVERSE_COND (insn) = tmp;
608 if (INSN_CACHED_COND (insn) == NULL_RTX)
610 INSN_CACHED_COND (insn) = const_true_rtx;
611 return NULL_RTX;
614 if (rev)
615 *rev = INSN_REVERSE_COND (insn);
616 return INSN_CACHED_COND (insn);
619 /* True when we can find a condition under which INSN is executed. */
620 static bool
621 sched_has_condition_p (const_rtx insn)
623 return !! sched_get_condition_with_rev (insn, NULL);
628 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
629 static int
630 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
632 if (COMPARISON_P (cond1)
633 && COMPARISON_P (cond2)
634 && GET_CODE (cond1) ==
635 (rev1==rev2
636 ? reversed_comparison_code (cond2, NULL)
637 : GET_CODE (cond2))
638 && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
639 && XEXP (cond1, 1) == XEXP (cond2, 1))
640 return 1;
641 return 0;
644 /* Return true if insn1 and insn2 can never depend on one another because
645 the conditions under which they are executed are mutually exclusive. */
646 bool
647 sched_insns_conditions_mutex_p (const_rtx insn1, const_rtx insn2)
649 rtx cond1, cond2;
650 bool rev1 = false, rev2 = false;
652 /* df doesn't handle conditional lifetimes entirely correctly;
653 calls mess up the conditional lifetimes. */
654 if (!CALL_P (insn1) && !CALL_P (insn2))
656 cond1 = sched_get_condition_with_rev (insn1, &rev1);
657 cond2 = sched_get_condition_with_rev (insn2, &rev2);
658 if (cond1 && cond2
659 && conditions_mutex_p (cond1, cond2, rev1, rev2)
660 /* Make sure first instruction doesn't affect condition of second
661 instruction if switched. */
662 && !modified_in_p (cond1, insn2)
663 /* Make sure second instruction doesn't affect condition of first
664 instruction if switched. */
665 && !modified_in_p (cond2, insn1))
666 return true;
668 return false;
672 /* Return true if INSN can potentially be speculated with type DS. */
673 bool
674 sched_insn_is_legitimate_for_speculation_p (const_rtx insn, ds_t ds)
676 if (HAS_INTERNAL_DEP (insn))
677 return false;
679 if (!NONJUMP_INSN_P (insn))
680 return false;
682 if (SCHED_GROUP_P (insn))
683 return false;
685 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX (insn)))
686 return false;
688 if (side_effects_p (PATTERN (insn)))
689 return false;
691 if (ds & BE_IN_SPEC)
692 /* The following instructions, which depend on a speculatively scheduled
693 instruction, cannot be speculatively scheduled along. */
695 if (may_trap_or_fault_p (PATTERN (insn)))
696 /* If instruction might fault, it cannot be speculatively scheduled.
697 For control speculation it's obvious why and for data speculation
698 it's because the insn might get wrong input if speculation
699 wasn't successful. */
700 return false;
702 if ((ds & BE_IN_DATA)
703 && sched_has_condition_p (insn))
704 /* If this is a predicated instruction, then it cannot be
705 speculatively scheduled. See PR35659. */
706 return false;
709 return true;
712 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
713 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
714 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
715 This function is used to switch sd_iterator to the next list.
716 !!! For internal use only. Might consider moving it to sched-int.h. */
717 void
718 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
719 deps_list_t *list_ptr, bool *resolved_p_ptr)
721 sd_list_types_def types = *types_ptr;
723 if (types & SD_LIST_HARD_BACK)
725 *list_ptr = INSN_HARD_BACK_DEPS (insn);
726 *resolved_p_ptr = false;
727 *types_ptr = types & ~SD_LIST_HARD_BACK;
729 else if (types & SD_LIST_SPEC_BACK)
731 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
732 *resolved_p_ptr = false;
733 *types_ptr = types & ~SD_LIST_SPEC_BACK;
735 else if (types & SD_LIST_FORW)
737 *list_ptr = INSN_FORW_DEPS (insn);
738 *resolved_p_ptr = false;
739 *types_ptr = types & ~SD_LIST_FORW;
741 else if (types & SD_LIST_RES_BACK)
743 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
744 *resolved_p_ptr = true;
745 *types_ptr = types & ~SD_LIST_RES_BACK;
747 else if (types & SD_LIST_RES_FORW)
749 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
750 *resolved_p_ptr = true;
751 *types_ptr = types & ~SD_LIST_RES_FORW;
753 else
755 *list_ptr = NULL;
756 *resolved_p_ptr = false;
757 *types_ptr = SD_LIST_NONE;
761 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
763 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
765 int size = 0;
767 while (list_types != SD_LIST_NONE)
769 deps_list_t list;
770 bool resolved_p;
772 sd_next_list (insn, &list_types, &list, &resolved_p);
773 if (list)
774 size += DEPS_LIST_N_LINKS (list);
777 return size;
780 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
782 bool
783 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
785 while (list_types != SD_LIST_NONE)
787 deps_list_t list;
788 bool resolved_p;
790 sd_next_list (insn, &list_types, &list, &resolved_p);
791 if (!deps_list_empty_p (list))
792 return false;
795 return true;
798 /* Initialize data for INSN. */
799 void
800 sd_init_insn (rtx insn)
802 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
803 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
804 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
805 INSN_FORW_DEPS (insn) = create_deps_list ();
806 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
808 /* ??? It would be nice to allocate dependency caches here. */
811 /* Free data for INSN. */
812 void
813 sd_finish_insn (rtx insn)
815 /* ??? It would be nice to deallocate dependency caches here. */
817 free_deps_list (INSN_HARD_BACK_DEPS (insn));
818 INSN_HARD_BACK_DEPS (insn) = NULL;
820 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
821 INSN_SPEC_BACK_DEPS (insn) = NULL;
823 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
824 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
826 free_deps_list (INSN_FORW_DEPS (insn));
827 INSN_FORW_DEPS (insn) = NULL;
829 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
830 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
833 /* Find a dependency between producer PRO and consumer CON.
834 Search through resolved dependency lists if RESOLVED_P is true.
835 If no such dependency is found return NULL,
836 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
837 with an iterator pointing to it. */
838 static dep_t
839 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
840 sd_iterator_def *sd_it_ptr)
842 sd_list_types_def pro_list_type;
843 sd_list_types_def con_list_type;
844 sd_iterator_def sd_it;
845 dep_t dep;
846 bool found_p = false;
848 if (resolved_p)
850 pro_list_type = SD_LIST_RES_FORW;
851 con_list_type = SD_LIST_RES_BACK;
853 else
855 pro_list_type = SD_LIST_FORW;
856 con_list_type = SD_LIST_BACK;
859 /* Walk through either back list of INSN or forw list of ELEM
860 depending on which one is shorter. */
861 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
863 /* Find the dep_link with producer PRO in consumer's back_deps. */
864 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
865 if (DEP_PRO (dep) == pro)
867 found_p = true;
868 break;
871 else
873 /* Find the dep_link with consumer CON in producer's forw_deps. */
874 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
875 if (DEP_CON (dep) == con)
877 found_p = true;
878 break;
882 if (found_p)
884 if (sd_it_ptr != NULL)
885 *sd_it_ptr = sd_it;
887 return dep;
890 return NULL;
893 /* Find a dependency between producer PRO and consumer CON.
894 Use dependency [if available] to check if dependency is present at all.
895 Search through resolved dependency lists if RESOLVED_P is true.
896 If the dependency or NULL if none found. */
897 dep_t
898 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
900 if (true_dependency_cache != NULL)
901 /* Avoiding the list walk below can cut compile times dramatically
902 for some code. */
904 int elem_luid = INSN_LUID (pro);
905 int insn_luid = INSN_LUID (con);
907 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
908 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
909 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
910 && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
911 return NULL;
914 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
917 /* Add or update a dependence described by DEP.
918 MEM1 and MEM2, if non-null, correspond to memory locations in case of
919 data speculation.
921 The function returns a value indicating if an old entry has been changed
922 or a new entry has been added to insn's backward deps.
924 This function merely checks if producer and consumer is the same insn
925 and doesn't create a dep in this case. Actual manipulation of
926 dependence data structures is performed in add_or_update_dep_1. */
927 static enum DEPS_ADJUST_RESULT
928 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
930 rtx elem = DEP_PRO (dep);
931 rtx insn = DEP_CON (dep);
933 gcc_assert (INSN_P (insn) && INSN_P (elem));
935 /* Don't depend an insn on itself. */
936 if (insn == elem)
938 if (sched_deps_info->generate_spec_deps)
939 /* INSN has an internal dependence, which we can't overcome. */
940 HAS_INTERNAL_DEP (insn) = 1;
942 return DEP_NODEP;
945 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
948 /* Ask dependency caches what needs to be done for dependence DEP.
949 Return DEP_CREATED if new dependence should be created and there is no
950 need to try to find one searching the dependencies lists.
951 Return DEP_PRESENT if there already is a dependence described by DEP and
952 hence nothing is to be done.
953 Return DEP_CHANGED if there already is a dependence, but it should be
954 updated to incorporate additional information from DEP. */
955 static enum DEPS_ADJUST_RESULT
956 ask_dependency_caches (dep_t dep)
958 int elem_luid = INSN_LUID (DEP_PRO (dep));
959 int insn_luid = INSN_LUID (DEP_CON (dep));
961 gcc_assert (true_dependency_cache != NULL
962 && output_dependency_cache != NULL
963 && anti_dependency_cache != NULL
964 && control_dependency_cache != NULL);
966 if (!(current_sched_info->flags & USE_DEPS_LIST))
968 enum reg_note present_dep_type;
970 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
971 present_dep_type = REG_DEP_TRUE;
972 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
973 present_dep_type = REG_DEP_OUTPUT;
974 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
975 present_dep_type = REG_DEP_ANTI;
976 else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
977 present_dep_type = REG_DEP_CONTROL;
978 else
979 /* There is no existing dep so it should be created. */
980 return DEP_CREATED;
982 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
983 /* DEP does not add anything to the existing dependence. */
984 return DEP_PRESENT;
986 else
988 ds_t present_dep_types = 0;
990 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
991 present_dep_types |= DEP_TRUE;
992 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
993 present_dep_types |= DEP_OUTPUT;
994 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
995 present_dep_types |= DEP_ANTI;
996 if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
997 present_dep_types |= DEP_CONTROL;
999 if (present_dep_types == 0)
1000 /* There is no existing dep so it should be created. */
1001 return DEP_CREATED;
1003 if (!(current_sched_info->flags & DO_SPECULATION)
1004 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
1006 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
1007 == present_dep_types)
1008 /* DEP does not add anything to the existing dependence. */
1009 return DEP_PRESENT;
1011 else
1013 /* Only true dependencies can be data speculative and
1014 only anti dependencies can be control speculative. */
1015 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1016 == present_dep_types);
1018 /* if (DEP is SPECULATIVE) then
1019 ..we should update DEP_STATUS
1020 else
1021 ..we should reset existing dep to non-speculative. */
1025 return DEP_CHANGED;
1028 /* Set dependency caches according to DEP. */
1029 static void
1030 set_dependency_caches (dep_t dep)
1032 int elem_luid = INSN_LUID (DEP_PRO (dep));
1033 int insn_luid = INSN_LUID (DEP_CON (dep));
1035 if (!(current_sched_info->flags & USE_DEPS_LIST))
1037 switch (DEP_TYPE (dep))
1039 case REG_DEP_TRUE:
1040 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1041 break;
1043 case REG_DEP_OUTPUT:
1044 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1045 break;
1047 case REG_DEP_ANTI:
1048 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1049 break;
1051 case REG_DEP_CONTROL:
1052 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1053 break;
1055 default:
1056 gcc_unreachable ();
1059 else
1061 ds_t ds = DEP_STATUS (dep);
1063 if (ds & DEP_TRUE)
1064 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1065 if (ds & DEP_OUTPUT)
1066 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1067 if (ds & DEP_ANTI)
1068 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1069 if (ds & DEP_CONTROL)
1070 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1072 if (ds & SPECULATIVE)
1074 gcc_assert (current_sched_info->flags & DO_SPECULATION);
1075 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1080 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
1081 caches accordingly. */
1082 static void
1083 update_dependency_caches (dep_t dep, enum reg_note old_type)
1085 int elem_luid = INSN_LUID (DEP_PRO (dep));
1086 int insn_luid = INSN_LUID (DEP_CON (dep));
1088 /* Clear corresponding cache entry because type of the link
1089 may have changed. Keep them if we use_deps_list. */
1090 if (!(current_sched_info->flags & USE_DEPS_LIST))
1092 switch (old_type)
1094 case REG_DEP_OUTPUT:
1095 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1096 break;
1098 case REG_DEP_ANTI:
1099 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1100 break;
1102 case REG_DEP_CONTROL:
1103 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1104 break;
1106 default:
1107 gcc_unreachable ();
1111 set_dependency_caches (dep);
1114 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
1115 static void
1116 change_spec_dep_to_hard (sd_iterator_def sd_it)
1118 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1119 dep_link_t link = DEP_NODE_BACK (node);
1120 dep_t dep = DEP_NODE_DEP (node);
1121 rtx elem = DEP_PRO (dep);
1122 rtx insn = DEP_CON (dep);
1124 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1126 DEP_STATUS (dep) &= ~SPECULATIVE;
1128 if (true_dependency_cache != NULL)
1129 /* Clear the cache entry. */
1130 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1131 INSN_LUID (elem));
1134 /* Update DEP to incorporate information from NEW_DEP.
1135 SD_IT points to DEP in case it should be moved to another list.
1136 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1137 data-speculative dependence should be updated. */
1138 static enum DEPS_ADJUST_RESULT
1139 update_dep (dep_t dep, dep_t new_dep,
1140 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1141 rtx mem1 ATTRIBUTE_UNUSED,
1142 rtx mem2 ATTRIBUTE_UNUSED)
1144 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1145 enum reg_note old_type = DEP_TYPE (dep);
1146 bool was_spec = dep_spec_p (dep);
1148 DEP_NONREG (dep) |= DEP_NONREG (new_dep);
1149 DEP_MULTIPLE (dep) = 1;
1151 /* If this is a more restrictive type of dependence than the
1152 existing one, then change the existing dependence to this
1153 type. */
1154 if ((int) DEP_TYPE (new_dep) < (int) old_type)
1156 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1157 res = DEP_CHANGED;
1160 if (current_sched_info->flags & USE_DEPS_LIST)
1161 /* Update DEP_STATUS. */
1163 ds_t dep_status = DEP_STATUS (dep);
1164 ds_t ds = DEP_STATUS (new_dep);
1165 ds_t new_status = ds | dep_status;
1167 if (new_status & SPECULATIVE)
1169 /* Either existing dep or a dep we're adding or both are
1170 speculative. */
1171 if (!(ds & SPECULATIVE)
1172 || !(dep_status & SPECULATIVE))
1173 /* The new dep can't be speculative. */
1174 new_status &= ~SPECULATIVE;
1175 else
1177 /* Both are speculative. Merge probabilities. */
1178 if (mem1 != NULL)
1180 dw_t dw;
1182 dw = estimate_dep_weak (mem1, mem2);
1183 ds = set_dep_weak (ds, BEGIN_DATA, dw);
1186 new_status = ds_merge (dep_status, ds);
1190 ds = new_status;
1192 if (dep_status != ds)
1194 DEP_STATUS (dep) = ds;
1195 res = DEP_CHANGED;
1199 if (was_spec && !dep_spec_p (dep))
1200 /* The old dep was speculative, but now it isn't. */
1201 change_spec_dep_to_hard (sd_it);
1203 if (true_dependency_cache != NULL
1204 && res == DEP_CHANGED)
1205 update_dependency_caches (dep, old_type);
1207 return res;
1210 /* Add or update a dependence described by DEP.
1211 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1212 data speculation.
1214 The function returns a value indicating if an old entry has been changed
1215 or a new entry has been added to insn's backward deps or nothing has
1216 been updated at all. */
1217 static enum DEPS_ADJUST_RESULT
1218 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1219 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1221 bool maybe_present_p = true;
1222 bool present_p = false;
1224 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1225 && DEP_PRO (new_dep) != DEP_CON (new_dep));
1227 #ifdef ENABLE_CHECKING
1228 check_dep (new_dep, mem1 != NULL);
1229 #endif
1231 if (true_dependency_cache != NULL)
1233 switch (ask_dependency_caches (new_dep))
1235 case DEP_PRESENT:
1236 dep_t present_dep;
1237 sd_iterator_def sd_it;
1239 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1240 DEP_CON (new_dep),
1241 resolved_p, &sd_it);
1242 DEP_MULTIPLE (present_dep) = 1;
1243 return DEP_PRESENT;
1245 case DEP_CHANGED:
1246 maybe_present_p = true;
1247 present_p = true;
1248 break;
1250 case DEP_CREATED:
1251 maybe_present_p = false;
1252 present_p = false;
1253 break;
1255 default:
1256 gcc_unreachable ();
1257 break;
1261 /* Check that we don't already have this dependence. */
1262 if (maybe_present_p)
1264 dep_t present_dep;
1265 sd_iterator_def sd_it;
1267 gcc_assert (true_dependency_cache == NULL || present_p);
1269 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1270 DEP_CON (new_dep),
1271 resolved_p, &sd_it);
1273 if (present_dep != NULL)
1274 /* We found an existing dependency between ELEM and INSN. */
1275 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1276 else
1277 /* We didn't find a dep, it shouldn't present in the cache. */
1278 gcc_assert (!present_p);
1281 /* Might want to check one level of transitivity to save conses.
1282 This check should be done in maybe_add_or_update_dep_1.
1283 Since we made it to add_or_update_dep_1, we must create
1284 (or update) a link. */
1286 if (mem1 != NULL_RTX)
1288 gcc_assert (sched_deps_info->generate_spec_deps);
1289 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1290 estimate_dep_weak (mem1, mem2));
1293 sd_add_dep (new_dep, resolved_p);
1295 return DEP_CREATED;
1298 /* Initialize BACK_LIST_PTR with consumer's backward list and
1299 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1300 initialize with lists that hold resolved deps. */
1301 static void
1302 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1303 deps_list_t *back_list_ptr,
1304 deps_list_t *forw_list_ptr)
1306 rtx con = DEP_CON (dep);
1308 if (!resolved_p)
1310 if (dep_spec_p (dep))
1311 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1312 else
1313 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1315 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1317 else
1319 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1320 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1324 /* Add dependence described by DEP.
1325 If RESOLVED_P is true treat the dependence as a resolved one. */
1326 void
1327 sd_add_dep (dep_t dep, bool resolved_p)
1329 dep_node_t n = create_dep_node ();
1330 deps_list_t con_back_deps;
1331 deps_list_t pro_forw_deps;
1332 rtx elem = DEP_PRO (dep);
1333 rtx insn = DEP_CON (dep);
1335 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1337 if ((current_sched_info->flags & DO_SPECULATION) == 0
1338 || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1339 DEP_STATUS (dep) &= ~SPECULATIVE;
1341 copy_dep (DEP_NODE_DEP (n), dep);
1343 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1345 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1347 #ifdef ENABLE_CHECKING
1348 check_dep (dep, false);
1349 #endif
1351 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1353 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1354 in the bitmap caches of dependency information. */
1355 if (true_dependency_cache != NULL)
1356 set_dependency_caches (dep);
1359 /* Add or update backward dependence between INSN and ELEM
1360 with given type DEP_TYPE and dep_status DS.
1361 This function is a convenience wrapper. */
1362 enum DEPS_ADJUST_RESULT
1363 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1365 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1368 /* Resolved dependence pointed to by SD_IT.
1369 SD_IT will advance to the next element. */
1370 void
1371 sd_resolve_dep (sd_iterator_def sd_it)
1373 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1374 dep_t dep = DEP_NODE_DEP (node);
1375 rtx pro = DEP_PRO (dep);
1376 rtx con = DEP_CON (dep);
1378 if (dep_spec_p (dep))
1379 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1380 INSN_RESOLVED_BACK_DEPS (con));
1381 else
1382 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1383 INSN_RESOLVED_BACK_DEPS (con));
1385 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1386 INSN_RESOLVED_FORW_DEPS (pro));
1389 /* Perform the inverse operation of sd_resolve_dep. Restore the dependence
1390 pointed to by SD_IT to unresolved state. */
1391 void
1392 sd_unresolve_dep (sd_iterator_def sd_it)
1394 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1395 dep_t dep = DEP_NODE_DEP (node);
1396 rtx pro = DEP_PRO (dep);
1397 rtx con = DEP_CON (dep);
1399 if (dep_spec_p (dep))
1400 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1401 INSN_SPEC_BACK_DEPS (con));
1402 else
1403 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1404 INSN_HARD_BACK_DEPS (con));
1406 move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1407 INSN_FORW_DEPS (pro));
1410 /* Make TO depend on all the FROM's producers.
1411 If RESOLVED_P is true add dependencies to the resolved lists. */
1412 void
1413 sd_copy_back_deps (rtx to, rtx from, bool resolved_p)
1415 sd_list_types_def list_type;
1416 sd_iterator_def sd_it;
1417 dep_t dep;
1419 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1421 FOR_EACH_DEP (from, list_type, sd_it, dep)
1423 dep_def _new_dep, *new_dep = &_new_dep;
1425 copy_dep (new_dep, dep);
1426 DEP_CON (new_dep) = to;
1427 sd_add_dep (new_dep, resolved_p);
1431 /* Remove a dependency referred to by SD_IT.
1432 SD_IT will point to the next dependence after removal. */
1433 void
1434 sd_delete_dep (sd_iterator_def sd_it)
1436 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1437 dep_t dep = DEP_NODE_DEP (n);
1438 rtx pro = DEP_PRO (dep);
1439 rtx con = DEP_CON (dep);
1440 deps_list_t con_back_deps;
1441 deps_list_t pro_forw_deps;
1443 if (true_dependency_cache != NULL)
1445 int elem_luid = INSN_LUID (pro);
1446 int insn_luid = INSN_LUID (con);
1448 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1449 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1450 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1451 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1453 if (current_sched_info->flags & DO_SPECULATION)
1454 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1457 get_back_and_forw_lists (dep, sd_it.resolved_p,
1458 &con_back_deps, &pro_forw_deps);
1460 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1461 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1463 delete_dep_node (n);
1466 /* Dump size of the lists. */
1467 #define DUMP_LISTS_SIZE (2)
1469 /* Dump dependencies of the lists. */
1470 #define DUMP_LISTS_DEPS (4)
1472 /* Dump all information about the lists. */
1473 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1475 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1476 FLAGS is a bit mask specifying what information about the lists needs
1477 to be printed.
1478 If FLAGS has the very first bit set, then dump all information about
1479 the lists and propagate this bit into the callee dump functions. */
1480 static void
1481 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1483 sd_iterator_def sd_it;
1484 dep_t dep;
1485 int all;
1487 all = (flags & 1);
1489 if (all)
1490 flags |= DUMP_LISTS_ALL;
1492 fprintf (dump, "[");
1494 if (flags & DUMP_LISTS_SIZE)
1495 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1497 if (flags & DUMP_LISTS_DEPS)
1499 FOR_EACH_DEP (insn, types, sd_it, dep)
1501 dump_dep (dump, dep, dump_dep_flags | all);
1502 fprintf (dump, " ");
1507 /* Dump all information about deps_lists of INSN specified by TYPES
1508 to STDERR. */
1509 void
1510 sd_debug_lists (rtx insn, sd_list_types_def types)
1512 dump_lists (stderr, insn, types, 1);
1513 fprintf (stderr, "\n");
1516 /* A wrapper around add_dependence_1, to add a dependence of CON on
1517 PRO, with type DEP_TYPE. This function implements special handling
1518 for REG_DEP_CONTROL dependencies. For these, we optionally promote
1519 the type to REG_DEP_ANTI if we can determine that predication is
1520 impossible; otherwise we add additional true dependencies on the
1521 INSN_COND_DEPS list of the jump (which PRO must be). */
1522 void
1523 add_dependence (rtx con, rtx pro, enum reg_note dep_type)
1525 if (dep_type == REG_DEP_CONTROL
1526 && !(current_sched_info->flags & DO_PREDICATION))
1527 dep_type = REG_DEP_ANTI;
1529 /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1530 so we must also make the insn dependent on the setter of the
1531 condition. */
1532 if (dep_type == REG_DEP_CONTROL)
1534 rtx real_pro = pro;
1535 rtx other = real_insn_for_shadow (real_pro);
1536 rtx cond;
1538 if (other != NULL_RTX)
1539 real_pro = other;
1540 cond = sched_get_reverse_condition_uncached (real_pro);
1541 /* Verify that the insn does not use a different value in
1542 the condition register than the one that was present at
1543 the jump. */
1544 if (cond == NULL_RTX)
1545 dep_type = REG_DEP_ANTI;
1546 else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1548 HARD_REG_SET uses;
1549 CLEAR_HARD_REG_SET (uses);
1550 note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1551 if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1552 dep_type = REG_DEP_ANTI;
1554 if (dep_type == REG_DEP_CONTROL)
1556 if (sched_verbose >= 5)
1557 fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1558 INSN_UID (real_pro));
1559 add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
1560 REG_DEP_TRUE, false);
1564 add_dependence_1 (con, pro, dep_type);
1567 /* A convenience wrapper to operate on an entire list. HARD should be
1568 true if DEP_NONREG should be set on newly created dependencies. */
1570 static void
1571 add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type,
1572 bool hard)
1574 mark_as_hard = hard;
1575 for (; list; list = XEXP (list, 1))
1577 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1578 add_dependence (insn, XEXP (list, 0), dep_type);
1580 mark_as_hard = false;
1583 /* Similar, but free *LISTP at the same time, when the context
1584 is not readonly. HARD should be true if DEP_NONREG should be set on
1585 newly created dependencies. */
1587 static void
1588 add_dependence_list_and_free (struct deps_desc *deps, rtx insn, rtx *listp,
1589 int uncond, enum reg_note dep_type, bool hard)
1591 add_dependence_list (insn, *listp, uncond, dep_type, hard);
1593 /* We don't want to short-circuit dependencies involving debug
1594 insns, because they may cause actual dependencies to be
1595 disregarded. */
1596 if (deps->readonly || DEBUG_INSN_P (insn))
1597 return;
1599 free_INSN_LIST_list (listp);
1602 /* Remove all occurrences of INSN from LIST. Return the number of
1603 occurrences removed. */
1605 static int
1606 remove_from_dependence_list (rtx insn, rtx* listp)
1608 int removed = 0;
1610 while (*listp)
1612 if (XEXP (*listp, 0) == insn)
1614 remove_free_INSN_LIST_node (listp);
1615 removed++;
1616 continue;
1619 listp = &XEXP (*listp, 1);
1622 return removed;
1625 /* Same as above, but process two lists at once. */
1626 static int
1627 remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
1629 int removed = 0;
1631 while (*listp)
1633 if (XEXP (*listp, 0) == insn)
1635 remove_free_INSN_LIST_node (listp);
1636 remove_free_EXPR_LIST_node (exprp);
1637 removed++;
1638 continue;
1641 listp = &XEXP (*listp, 1);
1642 exprp = &XEXP (*exprp, 1);
1645 return removed;
1648 /* Clear all dependencies for an insn. */
1649 static void
1650 delete_all_dependences (rtx insn)
1652 sd_iterator_def sd_it;
1653 dep_t dep;
1655 /* The below cycle can be optimized to clear the caches and back_deps
1656 in one call but that would provoke duplication of code from
1657 delete_dep (). */
1659 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1660 sd_iterator_cond (&sd_it, &dep);)
1661 sd_delete_dep (sd_it);
1664 /* All insns in a scheduling group except the first should only have
1665 dependencies on the previous insn in the group. So we find the
1666 first instruction in the scheduling group by walking the dependence
1667 chains backwards. Then we add the dependencies for the group to
1668 the previous nonnote insn. */
1670 static void
1671 chain_to_prev_insn (rtx insn)
1673 sd_iterator_def sd_it;
1674 dep_t dep;
1675 rtx prev_nonnote;
1677 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1679 rtx i = insn;
1680 rtx pro = DEP_PRO (dep);
1684 i = prev_nonnote_insn (i);
1686 if (pro == i)
1687 goto next_link;
1688 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1690 if (! sched_insns_conditions_mutex_p (i, pro))
1691 add_dependence (i, pro, DEP_TYPE (dep));
1692 next_link:;
1695 delete_all_dependences (insn);
1697 prev_nonnote = prev_nonnote_nondebug_insn (insn);
1698 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1699 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1700 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1703 /* Process an insn's memory dependencies. There are four kinds of
1704 dependencies:
1706 (0) read dependence: read follows read
1707 (1) true dependence: read follows write
1708 (2) output dependence: write follows write
1709 (3) anti dependence: write follows read
1711 We are careful to build only dependencies which actually exist, and
1712 use transitivity to avoid building too many links. */
1714 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1715 The MEM is a memory reference contained within INSN, which we are saving
1716 so that we can do memory aliasing on it. */
1718 static void
1719 add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1720 rtx insn, rtx mem)
1722 rtx *insn_list;
1723 rtx *mem_list;
1724 rtx link;
1726 gcc_assert (!deps->readonly);
1727 if (read_p)
1729 insn_list = &deps->pending_read_insns;
1730 mem_list = &deps->pending_read_mems;
1731 if (!DEBUG_INSN_P (insn))
1732 deps->pending_read_list_length++;
1734 else
1736 insn_list = &deps->pending_write_insns;
1737 mem_list = &deps->pending_write_mems;
1738 deps->pending_write_list_length++;
1741 link = alloc_INSN_LIST (insn, *insn_list);
1742 *insn_list = link;
1744 if (sched_deps_info->use_cselib)
1746 mem = shallow_copy_rtx (mem);
1747 XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
1748 GET_MODE (mem), insn);
1750 link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1751 *mem_list = link;
1754 /* Make a dependency between every memory reference on the pending lists
1755 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1756 dependencies for a read operation, similarly with FOR_WRITE. */
1758 static void
1759 flush_pending_lists (struct deps_desc *deps, rtx insn, int for_read,
1760 int for_write)
1762 if (for_write)
1764 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1765 1, REG_DEP_ANTI, true);
1766 if (!deps->readonly)
1768 free_EXPR_LIST_list (&deps->pending_read_mems);
1769 deps->pending_read_list_length = 0;
1773 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1774 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1775 true);
1777 add_dependence_list_and_free (deps, insn,
1778 &deps->last_pending_memory_flush, 1,
1779 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1780 true);
1782 add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
1783 REG_DEP_ANTI, true);
1785 if (DEBUG_INSN_P (insn))
1787 if (for_write)
1788 free_INSN_LIST_list (&deps->pending_read_insns);
1789 free_INSN_LIST_list (&deps->pending_write_insns);
1790 free_INSN_LIST_list (&deps->last_pending_memory_flush);
1791 free_INSN_LIST_list (&deps->pending_jump_insns);
1794 if (!deps->readonly)
1796 free_EXPR_LIST_list (&deps->pending_write_mems);
1797 deps->pending_write_list_length = 0;
1799 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1800 deps->pending_flush_length = 1;
1802 mark_as_hard = false;
1805 /* Instruction which dependencies we are analyzing. */
1806 static rtx cur_insn = NULL_RTX;
1808 /* Implement hooks for haifa scheduler. */
1810 static void
1811 haifa_start_insn (rtx insn)
1813 gcc_assert (insn && !cur_insn);
1815 cur_insn = insn;
1818 static void
1819 haifa_finish_insn (void)
1821 cur_insn = NULL;
1824 void
1825 haifa_note_reg_set (int regno)
1827 SET_REGNO_REG_SET (reg_pending_sets, regno);
1830 void
1831 haifa_note_reg_clobber (int regno)
1833 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1836 void
1837 haifa_note_reg_use (int regno)
1839 SET_REGNO_REG_SET (reg_pending_uses, regno);
1842 static void
1843 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx pending_insn, ds_t ds)
1845 if (!(ds & SPECULATIVE))
1847 mem = NULL_RTX;
1848 pending_mem = NULL_RTX;
1850 else
1851 gcc_assert (ds & BEGIN_DATA);
1854 dep_def _dep, *dep = &_dep;
1856 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1857 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1858 DEP_NONREG (dep) = 1;
1859 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1864 static void
1865 haifa_note_dep (rtx elem, ds_t ds)
1867 dep_def _dep;
1868 dep_t dep = &_dep;
1870 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1871 if (mark_as_hard)
1872 DEP_NONREG (dep) = 1;
1873 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1876 static void
1877 note_reg_use (int r)
1879 if (sched_deps_info->note_reg_use)
1880 sched_deps_info->note_reg_use (r);
1883 static void
1884 note_reg_set (int r)
1886 if (sched_deps_info->note_reg_set)
1887 sched_deps_info->note_reg_set (r);
1890 static void
1891 note_reg_clobber (int r)
1893 if (sched_deps_info->note_reg_clobber)
1894 sched_deps_info->note_reg_clobber (r);
1897 static void
1898 note_mem_dep (rtx m1, rtx m2, rtx e, ds_t ds)
1900 if (sched_deps_info->note_mem_dep)
1901 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1904 static void
1905 note_dep (rtx e, ds_t ds)
1907 if (sched_deps_info->note_dep)
1908 sched_deps_info->note_dep (e, ds);
1911 /* Return corresponding to DS reg_note. */
1912 enum reg_note
1913 ds_to_dt (ds_t ds)
1915 if (ds & DEP_TRUE)
1916 return REG_DEP_TRUE;
1917 else if (ds & DEP_OUTPUT)
1918 return REG_DEP_OUTPUT;
1919 else if (ds & DEP_ANTI)
1920 return REG_DEP_ANTI;
1921 else
1923 gcc_assert (ds & DEP_CONTROL);
1924 return REG_DEP_CONTROL;
1930 /* Functions for computation of info needed for register pressure
1931 sensitive insn scheduling. */
1934 /* Allocate and return reg_use_data structure for REGNO and INSN. */
1935 static struct reg_use_data *
1936 create_insn_reg_use (int regno, rtx insn)
1938 struct reg_use_data *use;
1940 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1941 use->regno = regno;
1942 use->insn = insn;
1943 use->next_insn_use = INSN_REG_USE_LIST (insn);
1944 INSN_REG_USE_LIST (insn) = use;
1945 return use;
1948 /* Allocate reg_set_data structure for REGNO and INSN. */
1949 static void
1950 create_insn_reg_set (int regno, rtx insn)
1952 struct reg_set_data *set;
1954 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1955 set->regno = regno;
1956 set->insn = insn;
1957 set->next_insn_set = INSN_REG_SET_LIST (insn);
1958 INSN_REG_SET_LIST (insn) = set;
1961 /* Set up insn register uses for INSN and dependency context DEPS. */
1962 static void
1963 setup_insn_reg_uses (struct deps_desc *deps, rtx insn)
1965 unsigned i;
1966 reg_set_iterator rsi;
1967 rtx list;
1968 struct reg_use_data *use, *use2, *next;
1969 struct deps_reg *reg_last;
1971 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1973 if (i < FIRST_PSEUDO_REGISTER
1974 && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1975 continue;
1977 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1978 && ! REGNO_REG_SET_P (reg_pending_sets, i)
1979 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1980 /* Ignore use which is not dying. */
1981 continue;
1983 use = create_insn_reg_use (i, insn);
1984 use->next_regno_use = use;
1985 reg_last = &deps->reg_last[i];
1987 /* Create the cycle list of uses. */
1988 for (list = reg_last->uses; list; list = XEXP (list, 1))
1990 use2 = create_insn_reg_use (i, XEXP (list, 0));
1991 next = use->next_regno_use;
1992 use->next_regno_use = use2;
1993 use2->next_regno_use = next;
1998 /* Register pressure info for the currently processed insn. */
1999 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
2001 /* Return TRUE if INSN has the use structure for REGNO. */
2002 static bool
2003 insn_use_p (rtx insn, int regno)
2005 struct reg_use_data *use;
2007 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2008 if (use->regno == regno)
2009 return true;
2010 return false;
2013 /* Update the register pressure info after birth of pseudo register REGNO
2014 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
2015 the register is in clobber or unused after the insn. */
2016 static void
2017 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
2019 int incr, new_incr;
2020 enum reg_class cl;
2022 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2023 cl = sched_regno_pressure_class[regno];
2024 if (cl != NO_REGS)
2026 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2027 if (clobber_p)
2029 new_incr = reg_pressure_info[cl].clobber_increase + incr;
2030 reg_pressure_info[cl].clobber_increase = new_incr;
2032 else if (unused_p)
2034 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2035 reg_pressure_info[cl].unused_set_increase = new_incr;
2037 else
2039 new_incr = reg_pressure_info[cl].set_increase + incr;
2040 reg_pressure_info[cl].set_increase = new_incr;
2041 if (! insn_use_p (insn, regno))
2042 reg_pressure_info[cl].change += incr;
2043 create_insn_reg_set (regno, insn);
2045 gcc_assert (new_incr < (1 << INCREASE_BITS));
2049 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2050 hard registers involved in the birth. */
2051 static void
2052 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2053 bool clobber_p, bool unused_p)
2055 enum reg_class cl;
2056 int new_incr, last = regno + nregs;
2058 while (regno < last)
2060 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2061 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2063 cl = sched_regno_pressure_class[regno];
2064 if (cl != NO_REGS)
2066 if (clobber_p)
2068 new_incr = reg_pressure_info[cl].clobber_increase + 1;
2069 reg_pressure_info[cl].clobber_increase = new_incr;
2071 else if (unused_p)
2073 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2074 reg_pressure_info[cl].unused_set_increase = new_incr;
2076 else
2078 new_incr = reg_pressure_info[cl].set_increase + 1;
2079 reg_pressure_info[cl].set_increase = new_incr;
2080 if (! insn_use_p (insn, regno))
2081 reg_pressure_info[cl].change += 1;
2082 create_insn_reg_set (regno, insn);
2084 gcc_assert (new_incr < (1 << INCREASE_BITS));
2087 regno++;
2091 /* Update the register pressure info after birth of pseudo or hard
2092 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
2093 correspondingly that the register is in clobber or unused after the
2094 insn. */
2095 static void
2096 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2098 int regno;
2100 if (GET_CODE (reg) == SUBREG)
2101 reg = SUBREG_REG (reg);
2103 if (! REG_P (reg))
2104 return;
2106 regno = REGNO (reg);
2107 if (regno < FIRST_PSEUDO_REGISTER)
2108 mark_insn_hard_regno_birth (insn, regno,
2109 hard_regno_nregs[regno][GET_MODE (reg)],
2110 clobber_p, unused_p);
2111 else
2112 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2115 /* Update the register pressure info after death of pseudo register
2116 REGNO. */
2117 static void
2118 mark_pseudo_death (int regno)
2120 int incr;
2121 enum reg_class cl;
2123 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2124 cl = sched_regno_pressure_class[regno];
2125 if (cl != NO_REGS)
2127 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2128 reg_pressure_info[cl].change -= incr;
2132 /* Like mark_pseudo_death except that NREGS saying how many hard
2133 registers involved in the death. */
2134 static void
2135 mark_hard_regno_death (int regno, int nregs)
2137 enum reg_class cl;
2138 int last = regno + nregs;
2140 while (regno < last)
2142 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2143 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2145 cl = sched_regno_pressure_class[regno];
2146 if (cl != NO_REGS)
2147 reg_pressure_info[cl].change -= 1;
2149 regno++;
2153 /* Update the register pressure info after death of pseudo or hard
2154 register REG. */
2155 static void
2156 mark_reg_death (rtx reg)
2158 int regno;
2160 if (GET_CODE (reg) == SUBREG)
2161 reg = SUBREG_REG (reg);
2163 if (! REG_P (reg))
2164 return;
2166 regno = REGNO (reg);
2167 if (regno < FIRST_PSEUDO_REGISTER)
2168 mark_hard_regno_death (regno, hard_regno_nregs[regno][GET_MODE (reg)]);
2169 else
2170 mark_pseudo_death (regno);
2173 /* Process SETTER of REG. DATA is an insn containing the setter. */
2174 static void
2175 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2177 if (setter != NULL_RTX && GET_CODE (setter) != SET)
2178 return;
2179 mark_insn_reg_birth
2180 ((rtx) data, reg, false,
2181 find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2184 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
2185 static void
2186 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2188 if (GET_CODE (setter) == CLOBBER)
2189 mark_insn_reg_birth ((rtx) data, reg, true, false);
2192 /* Set up reg pressure info related to INSN. */
2193 void
2194 init_insn_reg_pressure_info (rtx insn)
2196 int i, len;
2197 enum reg_class cl;
2198 static struct reg_pressure_data *pressure_info;
2199 rtx link;
2201 gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
2203 if (! INSN_P (insn))
2204 return;
2206 for (i = 0; i < ira_pressure_classes_num; i++)
2208 cl = ira_pressure_classes[i];
2209 reg_pressure_info[cl].clobber_increase = 0;
2210 reg_pressure_info[cl].set_increase = 0;
2211 reg_pressure_info[cl].unused_set_increase = 0;
2212 reg_pressure_info[cl].change = 0;
2215 note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2217 note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2219 #ifdef AUTO_INC_DEC
2220 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2221 if (REG_NOTE_KIND (link) == REG_INC)
2222 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2223 #endif
2225 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2226 if (REG_NOTE_KIND (link) == REG_DEAD)
2227 mark_reg_death (XEXP (link, 0));
2229 len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
2230 pressure_info
2231 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2232 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2233 INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2234 * sizeof (int), 1);
2235 for (i = 0; i < ira_pressure_classes_num; i++)
2237 cl = ira_pressure_classes[i];
2238 pressure_info[i].clobber_increase
2239 = reg_pressure_info[cl].clobber_increase;
2240 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2241 pressure_info[i].unused_set_increase
2242 = reg_pressure_info[cl].unused_set_increase;
2243 pressure_info[i].change = reg_pressure_info[cl].change;
2250 /* Internal variable for sched_analyze_[12] () functions.
2251 If it is nonzero, this means that sched_analyze_[12] looks
2252 at the most toplevel SET. */
2253 static bool can_start_lhs_rhs_p;
2255 /* Extend reg info for the deps context DEPS given that
2256 we have just generated a register numbered REGNO. */
2257 static void
2258 extend_deps_reg_info (struct deps_desc *deps, int regno)
2260 int max_regno = regno + 1;
2262 gcc_assert (!reload_completed);
2264 /* In a readonly context, it would not hurt to extend info,
2265 but it should not be needed. */
2266 if (reload_completed && deps->readonly)
2268 deps->max_reg = max_regno;
2269 return;
2272 if (max_regno > deps->max_reg)
2274 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2275 max_regno);
2276 memset (&deps->reg_last[deps->max_reg],
2277 0, (max_regno - deps->max_reg)
2278 * sizeof (struct deps_reg));
2279 deps->max_reg = max_regno;
2283 /* Extends REG_INFO_P if needed. */
2284 void
2285 maybe_extend_reg_info_p (void)
2287 /* Extend REG_INFO_P, if needed. */
2288 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2290 size_t new_reg_info_p_size = max_regno + 128;
2292 gcc_assert (!reload_completed && sel_sched_p ());
2294 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2295 new_reg_info_p_size,
2296 reg_info_p_size,
2297 sizeof (*reg_info_p));
2298 reg_info_p_size = new_reg_info_p_size;
2302 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2303 The type of the reference is specified by REF and can be SET,
2304 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2306 static void
2307 sched_analyze_reg (struct deps_desc *deps, int regno, enum machine_mode mode,
2308 enum rtx_code ref, rtx insn)
2310 /* We could emit new pseudos in renaming. Extend the reg structures. */
2311 if (!reload_completed && sel_sched_p ()
2312 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2313 extend_deps_reg_info (deps, regno);
2315 maybe_extend_reg_info_p ();
2317 /* A hard reg in a wide mode may really be multiple registers.
2318 If so, mark all of them just like the first. */
2319 if (regno < FIRST_PSEUDO_REGISTER)
2321 int i = hard_regno_nregs[regno][mode];
2322 if (ref == SET)
2324 while (--i >= 0)
2325 note_reg_set (regno + i);
2327 else if (ref == USE)
2329 while (--i >= 0)
2330 note_reg_use (regno + i);
2332 else
2334 while (--i >= 0)
2335 note_reg_clobber (regno + i);
2339 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2340 it does not reload. Ignore these as they have served their
2341 purpose already. */
2342 else if (regno >= deps->max_reg)
2344 enum rtx_code code = GET_CODE (PATTERN (insn));
2345 gcc_assert (code == USE || code == CLOBBER);
2348 else
2350 if (ref == SET)
2351 note_reg_set (regno);
2352 else if (ref == USE)
2353 note_reg_use (regno);
2354 else
2355 note_reg_clobber (regno);
2357 /* Pseudos that are REG_EQUIV to something may be replaced
2358 by that during reloading. We need only add dependencies for
2359 the address in the REG_EQUIV note. */
2360 if (!reload_completed && get_reg_known_equiv_p (regno))
2362 rtx t = get_reg_known_value (regno);
2363 if (MEM_P (t))
2364 sched_analyze_2 (deps, XEXP (t, 0), insn);
2367 /* Don't let it cross a call after scheduling if it doesn't
2368 already cross one. */
2369 if (REG_N_CALLS_CROSSED (regno) == 0)
2371 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2372 deps->sched_before_next_call
2373 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2374 else
2375 add_dependence_list (insn, deps->last_function_call, 1,
2376 REG_DEP_ANTI, false);
2381 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2382 rtx, X, creating all dependencies generated by the write to the
2383 destination of X, and reads of everything mentioned. */
2385 static void
2386 sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn)
2388 rtx dest = XEXP (x, 0);
2389 enum rtx_code code = GET_CODE (x);
2390 bool cslr_p = can_start_lhs_rhs_p;
2392 can_start_lhs_rhs_p = false;
2394 gcc_assert (dest);
2395 if (dest == 0)
2396 return;
2398 if (cslr_p && sched_deps_info->start_lhs)
2399 sched_deps_info->start_lhs (dest);
2401 if (GET_CODE (dest) == PARALLEL)
2403 int i;
2405 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2406 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2407 sched_analyze_1 (deps,
2408 gen_rtx_CLOBBER (VOIDmode,
2409 XEXP (XVECEXP (dest, 0, i), 0)),
2410 insn);
2412 if (cslr_p && sched_deps_info->finish_lhs)
2413 sched_deps_info->finish_lhs ();
2415 if (code == SET)
2417 can_start_lhs_rhs_p = cslr_p;
2419 sched_analyze_2 (deps, SET_SRC (x), insn);
2421 can_start_lhs_rhs_p = false;
2424 return;
2427 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2428 || GET_CODE (dest) == ZERO_EXTRACT)
2430 if (GET_CODE (dest) == STRICT_LOW_PART
2431 || GET_CODE (dest) == ZERO_EXTRACT
2432 || df_read_modify_subreg_p (dest))
2434 /* These both read and modify the result. We must handle
2435 them as writes to get proper dependencies for following
2436 instructions. We must handle them as reads to get proper
2437 dependencies from this to previous instructions.
2438 Thus we need to call sched_analyze_2. */
2440 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2442 if (GET_CODE (dest) == ZERO_EXTRACT)
2444 /* The second and third arguments are values read by this insn. */
2445 sched_analyze_2 (deps, XEXP (dest, 1), insn);
2446 sched_analyze_2 (deps, XEXP (dest, 2), insn);
2448 dest = XEXP (dest, 0);
2451 if (REG_P (dest))
2453 int regno = REGNO (dest);
2454 enum machine_mode mode = GET_MODE (dest);
2456 sched_analyze_reg (deps, regno, mode, code, insn);
2458 #ifdef STACK_REGS
2459 /* Treat all writes to a stack register as modifying the TOS. */
2460 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2462 /* Avoid analyzing the same register twice. */
2463 if (regno != FIRST_STACK_REG)
2464 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2466 add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2467 FIRST_STACK_REG);
2469 #endif
2471 else if (MEM_P (dest))
2473 /* Writing memory. */
2474 rtx t = dest;
2476 if (sched_deps_info->use_cselib)
2478 enum machine_mode address_mode = get_address_mode (dest);
2480 t = shallow_copy_rtx (dest);
2481 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2482 GET_MODE (t), insn);
2483 XEXP (t, 0)
2484 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2485 insn);
2487 t = canon_rtx (t);
2489 /* Pending lists can't get larger with a readonly context. */
2490 if (!deps->readonly
2491 && ((deps->pending_read_list_length + deps->pending_write_list_length)
2492 >= MAX_PENDING_LIST_LENGTH))
2494 /* Flush all pending reads and writes to prevent the pending lists
2495 from getting any larger. Insn scheduling runs too slowly when
2496 these lists get long. When compiling GCC with itself,
2497 this flush occurs 8 times for sparc, and 10 times for m88k using
2498 the default value of 32. */
2499 flush_pending_lists (deps, insn, false, true);
2501 else
2503 rtx pending, pending_mem;
2505 pending = deps->pending_read_insns;
2506 pending_mem = deps->pending_read_mems;
2507 while (pending)
2509 if (anti_dependence (XEXP (pending_mem, 0), t)
2510 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2511 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2512 DEP_ANTI);
2514 pending = XEXP (pending, 1);
2515 pending_mem = XEXP (pending_mem, 1);
2518 pending = deps->pending_write_insns;
2519 pending_mem = deps->pending_write_mems;
2520 while (pending)
2522 if (output_dependence (XEXP (pending_mem, 0), t)
2523 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2524 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2525 DEP_OUTPUT);
2527 pending = XEXP (pending, 1);
2528 pending_mem = XEXP (pending_mem, 1);
2531 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2532 REG_DEP_ANTI, true);
2533 add_dependence_list (insn, deps->pending_jump_insns, 1,
2534 REG_DEP_CONTROL, true);
2536 if (!deps->readonly)
2537 add_insn_mem_dependence (deps, false, insn, dest);
2539 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2542 if (cslr_p && sched_deps_info->finish_lhs)
2543 sched_deps_info->finish_lhs ();
2545 /* Analyze reads. */
2546 if (GET_CODE (x) == SET)
2548 can_start_lhs_rhs_p = cslr_p;
2550 sched_analyze_2 (deps, SET_SRC (x), insn);
2552 can_start_lhs_rhs_p = false;
2556 /* Analyze the uses of memory and registers in rtx X in INSN. */
2557 static void
2558 sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn)
2560 int i;
2561 int j;
2562 enum rtx_code code;
2563 const char *fmt;
2564 bool cslr_p = can_start_lhs_rhs_p;
2566 can_start_lhs_rhs_p = false;
2568 gcc_assert (x);
2569 if (x == 0)
2570 return;
2572 if (cslr_p && sched_deps_info->start_rhs)
2573 sched_deps_info->start_rhs (x);
2575 code = GET_CODE (x);
2577 switch (code)
2579 CASE_CONST_ANY:
2580 case SYMBOL_REF:
2581 case CONST:
2582 case LABEL_REF:
2583 /* Ignore constants. */
2584 if (cslr_p && sched_deps_info->finish_rhs)
2585 sched_deps_info->finish_rhs ();
2587 return;
2589 #ifdef HAVE_cc0
2590 case CC0:
2591 /* User of CC0 depends on immediately preceding insn. */
2592 SCHED_GROUP_P (insn) = 1;
2593 /* Don't move CC0 setter to another block (it can set up the
2594 same flag for previous CC0 users which is safe). */
2595 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2597 if (cslr_p && sched_deps_info->finish_rhs)
2598 sched_deps_info->finish_rhs ();
2600 return;
2601 #endif
2603 case REG:
2605 int regno = REGNO (x);
2606 enum machine_mode mode = GET_MODE (x);
2608 sched_analyze_reg (deps, regno, mode, USE, insn);
2610 #ifdef STACK_REGS
2611 /* Treat all reads of a stack register as modifying the TOS. */
2612 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2614 /* Avoid analyzing the same register twice. */
2615 if (regno != FIRST_STACK_REG)
2616 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2617 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2619 #endif
2621 if (cslr_p && sched_deps_info->finish_rhs)
2622 sched_deps_info->finish_rhs ();
2624 return;
2627 case MEM:
2629 /* Reading memory. */
2630 rtx u;
2631 rtx pending, pending_mem;
2632 rtx t = x;
2634 if (sched_deps_info->use_cselib)
2636 enum machine_mode address_mode = get_address_mode (t);
2638 t = shallow_copy_rtx (t);
2639 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2640 GET_MODE (t), insn);
2641 XEXP (t, 0)
2642 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2643 insn);
2646 if (!DEBUG_INSN_P (insn))
2648 t = canon_rtx (t);
2649 pending = deps->pending_read_insns;
2650 pending_mem = deps->pending_read_mems;
2651 while (pending)
2653 if (read_dependence (XEXP (pending_mem, 0), t)
2654 && ! sched_insns_conditions_mutex_p (insn,
2655 XEXP (pending, 0)))
2656 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2657 DEP_ANTI);
2659 pending = XEXP (pending, 1);
2660 pending_mem = XEXP (pending_mem, 1);
2663 pending = deps->pending_write_insns;
2664 pending_mem = deps->pending_write_mems;
2665 while (pending)
2667 if (true_dependence (XEXP (pending_mem, 0), VOIDmode, t)
2668 && ! sched_insns_conditions_mutex_p (insn,
2669 XEXP (pending, 0)))
2670 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2671 sched_deps_info->generate_spec_deps
2672 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2674 pending = XEXP (pending, 1);
2675 pending_mem = XEXP (pending_mem, 1);
2678 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2679 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2681 for (u = deps->pending_jump_insns; u; u = XEXP (u, 1))
2682 if (deps_may_trap_p (x))
2684 if ((sched_deps_info->generate_spec_deps)
2685 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2687 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2688 MAX_DEP_WEAK);
2690 note_dep (XEXP (u, 0), ds);
2692 else
2693 add_dependence (insn, XEXP (u, 0), REG_DEP_CONTROL);
2697 /* Always add these dependencies to pending_reads, since
2698 this insn may be followed by a write. */
2699 if (!deps->readonly)
2701 if ((deps->pending_read_list_length
2702 + deps->pending_write_list_length)
2703 >= MAX_PENDING_LIST_LENGTH
2704 && !DEBUG_INSN_P (insn))
2705 flush_pending_lists (deps, insn, true, true);
2706 add_insn_mem_dependence (deps, true, insn, x);
2709 sched_analyze_2 (deps, XEXP (x, 0), insn);
2711 if (cslr_p && sched_deps_info->finish_rhs)
2712 sched_deps_info->finish_rhs ();
2714 return;
2717 /* Force pending stores to memory in case a trap handler needs them. */
2718 case TRAP_IF:
2719 flush_pending_lists (deps, insn, true, false);
2720 break;
2722 case PREFETCH:
2723 if (PREFETCH_SCHEDULE_BARRIER_P (x))
2724 reg_pending_barrier = TRUE_BARRIER;
2725 /* Prefetch insn contains addresses only. So if the prefetch
2726 address has no registers, there will be no dependencies on
2727 the prefetch insn. This is wrong with result code
2728 correctness point of view as such prefetch can be moved below
2729 a jump insn which usually generates MOVE_BARRIER preventing
2730 to move insns containing registers or memories through the
2731 barrier. It is also wrong with generated code performance
2732 point of view as prefetch withouth dependecies will have a
2733 tendency to be issued later instead of earlier. It is hard
2734 to generate accurate dependencies for prefetch insns as
2735 prefetch has only the start address but it is better to have
2736 something than nothing. */
2737 if (!deps->readonly)
2739 rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0));
2740 if (sched_deps_info->use_cselib)
2741 cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn);
2742 add_insn_mem_dependence (deps, true, insn, x);
2744 break;
2746 case UNSPEC_VOLATILE:
2747 flush_pending_lists (deps, insn, true, true);
2748 /* FALLTHRU */
2750 case ASM_OPERANDS:
2751 case ASM_INPUT:
2753 /* Traditional and volatile asm instructions must be considered to use
2754 and clobber all hard registers, all pseudo-registers and all of
2755 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2757 Consider for instance a volatile asm that changes the fpu rounding
2758 mode. An insn should not be moved across this even if it only uses
2759 pseudo-regs because it might give an incorrectly rounded result. */
2760 if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2761 && !DEBUG_INSN_P (insn))
2762 reg_pending_barrier = TRUE_BARRIER;
2764 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2765 We can not just fall through here since then we would be confused
2766 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2767 traditional asms unlike their normal usage. */
2769 if (code == ASM_OPERANDS)
2771 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2772 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2774 if (cslr_p && sched_deps_info->finish_rhs)
2775 sched_deps_info->finish_rhs ();
2777 return;
2779 break;
2782 case PRE_DEC:
2783 case POST_DEC:
2784 case PRE_INC:
2785 case POST_INC:
2786 /* These both read and modify the result. We must handle them as writes
2787 to get proper dependencies for following instructions. We must handle
2788 them as reads to get proper dependencies from this to previous
2789 instructions. Thus we need to pass them to both sched_analyze_1
2790 and sched_analyze_2. We must call sched_analyze_2 first in order
2791 to get the proper antecedent for the read. */
2792 sched_analyze_2 (deps, XEXP (x, 0), insn);
2793 sched_analyze_1 (deps, x, insn);
2795 if (cslr_p && sched_deps_info->finish_rhs)
2796 sched_deps_info->finish_rhs ();
2798 return;
2800 case POST_MODIFY:
2801 case PRE_MODIFY:
2802 /* op0 = op0 + op1 */
2803 sched_analyze_2 (deps, XEXP (x, 0), insn);
2804 sched_analyze_2 (deps, XEXP (x, 1), insn);
2805 sched_analyze_1 (deps, x, insn);
2807 if (cslr_p && sched_deps_info->finish_rhs)
2808 sched_deps_info->finish_rhs ();
2810 return;
2812 default:
2813 break;
2816 /* Other cases: walk the insn. */
2817 fmt = GET_RTX_FORMAT (code);
2818 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2820 if (fmt[i] == 'e')
2821 sched_analyze_2 (deps, XEXP (x, i), insn);
2822 else if (fmt[i] == 'E')
2823 for (j = 0; j < XVECLEN (x, i); j++)
2824 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2827 if (cslr_p && sched_deps_info->finish_rhs)
2828 sched_deps_info->finish_rhs ();
2831 /* Try to group two fuseable insns together to prevent scheduler
2832 from scheduling them apart. */
2834 static void
2835 sched_macro_fuse_insns (rtx insn)
2837 rtx prev;
2839 if (any_condjump_p (insn))
2841 unsigned int condreg1, condreg2;
2842 rtx cc_reg_1;
2843 targetm.fixed_condition_code_regs (&condreg1, &condreg2);
2844 cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
2845 prev = prev_nonnote_nondebug_insn (insn);
2846 if (!reg_referenced_p (cc_reg_1, PATTERN (insn))
2847 || !prev
2848 || !modified_in_p (cc_reg_1, prev))
2849 return;
2851 else
2853 rtx insn_set = single_set (insn);
2855 prev = prev_nonnote_nondebug_insn (insn);
2856 if (!prev
2857 || !insn_set
2858 || !single_set (prev))
2859 return;
2863 if (targetm.sched.macro_fusion_pair_p (prev, insn))
2864 SCHED_GROUP_P (insn) = 1;
2868 /* Analyze an INSN with pattern X to find all dependencies. */
2869 static void
2870 sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
2872 RTX_CODE code = GET_CODE (x);
2873 rtx link;
2874 unsigned i;
2875 reg_set_iterator rsi;
2877 if (! reload_completed)
2879 HARD_REG_SET temp;
2881 extract_insn (insn);
2882 preprocess_constraints ();
2883 ira_implicitly_set_insn_hard_regs (&temp);
2884 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
2885 IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2888 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2889 && code == SET);
2891 /* Group compare and branch insns for macro-fusion. */
2892 if (targetm.sched.macro_fusion_p
2893 && targetm.sched.macro_fusion_p ())
2894 sched_macro_fuse_insns (insn);
2896 if (may_trap_p (x))
2897 /* Avoid moving trapping instructions across function calls that might
2898 not always return. */
2899 add_dependence_list (insn, deps->last_function_call_may_noreturn,
2900 1, REG_DEP_ANTI, true);
2902 /* We must avoid creating a situation in which two successors of the
2903 current block have different unwind info after scheduling. If at any
2904 point the two paths re-join this leads to incorrect unwind info. */
2905 /* ??? There are certain situations involving a forced frame pointer in
2906 which, with extra effort, we could fix up the unwind info at a later
2907 CFG join. However, it seems better to notice these cases earlier
2908 during prologue generation and avoid marking the frame pointer setup
2909 as frame-related at all. */
2910 if (RTX_FRAME_RELATED_P (insn))
2912 /* Make sure prologue insn is scheduled before next jump. */
2913 deps->sched_before_next_jump
2914 = alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2916 /* Make sure epilogue insn is scheduled after preceding jumps. */
2917 add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
2918 true);
2921 if (code == COND_EXEC)
2923 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2925 /* ??? Should be recording conditions so we reduce the number of
2926 false dependencies. */
2927 x = COND_EXEC_CODE (x);
2928 code = GET_CODE (x);
2930 if (code == SET || code == CLOBBER)
2932 sched_analyze_1 (deps, x, insn);
2934 /* Bare clobber insns are used for letting life analysis, reg-stack
2935 and others know that a value is dead. Depend on the last call
2936 instruction so that reg-stack won't get confused. */
2937 if (code == CLOBBER)
2938 add_dependence_list (insn, deps->last_function_call, 1,
2939 REG_DEP_OUTPUT, true);
2941 else if (code == PARALLEL)
2943 for (i = XVECLEN (x, 0); i--;)
2945 rtx sub = XVECEXP (x, 0, i);
2946 code = GET_CODE (sub);
2948 if (code == COND_EXEC)
2950 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2951 sub = COND_EXEC_CODE (sub);
2952 code = GET_CODE (sub);
2954 if (code == SET || code == CLOBBER)
2955 sched_analyze_1 (deps, sub, insn);
2956 else
2957 sched_analyze_2 (deps, sub, insn);
2960 else
2961 sched_analyze_2 (deps, x, insn);
2963 /* Mark registers CLOBBERED or used by called function. */
2964 if (CALL_P (insn))
2966 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2968 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2969 sched_analyze_1 (deps, XEXP (link, 0), insn);
2970 else if (GET_CODE (XEXP (link, 0)) != SET)
2971 sched_analyze_2 (deps, XEXP (link, 0), insn);
2973 /* Don't schedule anything after a tail call, tail call needs
2974 to use at least all call-saved registers. */
2975 if (SIBLING_CALL_P (insn))
2976 reg_pending_barrier = TRUE_BARRIER;
2977 else if (find_reg_note (insn, REG_SETJMP, NULL))
2978 reg_pending_barrier = MOVE_BARRIER;
2981 if (JUMP_P (insn))
2983 rtx next;
2984 next = next_nonnote_nondebug_insn (insn);
2985 if (next && BARRIER_P (next))
2986 reg_pending_barrier = MOVE_BARRIER;
2987 else
2989 rtx pending, pending_mem;
2991 if (sched_deps_info->compute_jump_reg_dependencies)
2993 (*sched_deps_info->compute_jump_reg_dependencies)
2994 (insn, reg_pending_control_uses);
2996 /* Make latency of jump equal to 0 by using anti-dependence. */
2997 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
2999 struct deps_reg *reg_last = &deps->reg_last[i];
3000 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
3001 false);
3002 add_dependence_list (insn, reg_last->implicit_sets,
3003 0, REG_DEP_ANTI, false);
3004 add_dependence_list (insn, reg_last->clobbers, 0,
3005 REG_DEP_ANTI, false);
3009 /* All memory writes and volatile reads must happen before the
3010 jump. Non-volatile reads must happen before the jump iff
3011 the result is needed by the above register used mask. */
3013 pending = deps->pending_write_insns;
3014 pending_mem = deps->pending_write_mems;
3015 while (pending)
3017 if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
3018 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
3019 pending = XEXP (pending, 1);
3020 pending_mem = XEXP (pending_mem, 1);
3023 pending = deps->pending_read_insns;
3024 pending_mem = deps->pending_read_mems;
3025 while (pending)
3027 if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
3028 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
3029 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
3030 pending = XEXP (pending, 1);
3031 pending_mem = XEXP (pending_mem, 1);
3034 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
3035 REG_DEP_ANTI, true);
3036 add_dependence_list (insn, deps->pending_jump_insns, 1,
3037 REG_DEP_ANTI, true);
3041 /* If this instruction can throw an exception, then moving it changes
3042 where block boundaries fall. This is mighty confusing elsewhere.
3043 Therefore, prevent such an instruction from being moved. Same for
3044 non-jump instructions that define block boundaries.
3045 ??? Unclear whether this is still necessary in EBB mode. If not,
3046 add_branch_dependences should be adjusted for RGN mode instead. */
3047 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
3048 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
3049 reg_pending_barrier = MOVE_BARRIER;
3051 if (sched_pressure != SCHED_PRESSURE_NONE)
3053 setup_insn_reg_uses (deps, insn);
3054 init_insn_reg_pressure_info (insn);
3057 /* Add register dependencies for insn. */
3058 if (DEBUG_INSN_P (insn))
3060 rtx prev = deps->last_debug_insn;
3061 rtx u;
3063 if (!deps->readonly)
3064 deps->last_debug_insn = insn;
3066 if (prev)
3067 add_dependence (insn, prev, REG_DEP_ANTI);
3069 add_dependence_list (insn, deps->last_function_call, 1,
3070 REG_DEP_ANTI, false);
3072 if (!sel_sched_p ())
3073 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
3074 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
3076 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3078 struct deps_reg *reg_last = &deps->reg_last[i];
3079 add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
3080 /* There's no point in making REG_DEP_CONTROL dependencies for
3081 debug insns. */
3082 add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
3083 false);
3085 if (!deps->readonly)
3086 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3088 CLEAR_REG_SET (reg_pending_uses);
3090 /* Quite often, a debug insn will refer to stuff in the
3091 previous instruction, but the reason we want this
3092 dependency here is to make sure the scheduler doesn't
3093 gratuitously move a debug insn ahead. This could dirty
3094 DF flags and cause additional analysis that wouldn't have
3095 occurred in compilation without debug insns, and such
3096 additional analysis can modify the generated code. */
3097 prev = PREV_INSN (insn);
3099 if (prev && NONDEBUG_INSN_P (prev))
3100 add_dependence (insn, prev, REG_DEP_ANTI);
3102 else
3104 regset_head set_or_clobbered;
3106 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3108 struct deps_reg *reg_last = &deps->reg_last[i];
3109 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3110 add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
3111 false);
3112 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3113 false);
3115 if (!deps->readonly)
3117 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3118 reg_last->uses_length++;
3122 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3123 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3125 struct deps_reg *reg_last = &deps->reg_last[i];
3126 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3127 add_dependence_list (insn, reg_last->implicit_sets, 0,
3128 REG_DEP_ANTI, false);
3129 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3130 false);
3132 if (!deps->readonly)
3134 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3135 reg_last->uses_length++;
3139 if (targetm.sched.exposed_pipeline)
3141 INIT_REG_SET (&set_or_clobbered);
3142 bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3143 reg_pending_sets);
3144 EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3146 struct deps_reg *reg_last = &deps->reg_last[i];
3147 rtx list;
3148 for (list = reg_last->uses; list; list = XEXP (list, 1))
3150 rtx other = XEXP (list, 0);
3151 if (INSN_CACHED_COND (other) != const_true_rtx
3152 && refers_to_regno_p (i, i + 1, INSN_CACHED_COND (other), NULL))
3153 INSN_CACHED_COND (other) = const_true_rtx;
3158 /* If the current insn is conditional, we can't free any
3159 of the lists. */
3160 if (sched_has_condition_p (insn))
3162 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3164 struct deps_reg *reg_last = &deps->reg_last[i];
3165 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3166 false);
3167 add_dependence_list (insn, reg_last->implicit_sets, 0,
3168 REG_DEP_ANTI, false);
3169 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3170 false);
3171 add_dependence_list (insn, reg_last->control_uses, 0,
3172 REG_DEP_CONTROL, false);
3174 if (!deps->readonly)
3176 reg_last->clobbers
3177 = alloc_INSN_LIST (insn, reg_last->clobbers);
3178 reg_last->clobbers_length++;
3181 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3183 struct deps_reg *reg_last = &deps->reg_last[i];
3184 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3185 false);
3186 add_dependence_list (insn, reg_last->implicit_sets, 0,
3187 REG_DEP_ANTI, false);
3188 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
3189 false);
3190 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3191 false);
3192 add_dependence_list (insn, reg_last->control_uses, 0,
3193 REG_DEP_CONTROL, false);
3195 if (!deps->readonly)
3196 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3199 else
3201 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3203 struct deps_reg *reg_last = &deps->reg_last[i];
3204 if (reg_last->uses_length >= MAX_PENDING_LIST_LENGTH
3205 || reg_last->clobbers_length >= MAX_PENDING_LIST_LENGTH)
3207 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3208 REG_DEP_OUTPUT, false);
3209 add_dependence_list_and_free (deps, insn,
3210 &reg_last->implicit_sets, 0,
3211 REG_DEP_ANTI, false);
3212 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3213 REG_DEP_ANTI, false);
3214 add_dependence_list_and_free (deps, insn,
3215 &reg_last->control_uses, 0,
3216 REG_DEP_ANTI, false);
3217 add_dependence_list_and_free (deps, insn,
3218 &reg_last->clobbers, 0,
3219 REG_DEP_OUTPUT, false);
3221 if (!deps->readonly)
3223 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3224 reg_last->clobbers_length = 0;
3225 reg_last->uses_length = 0;
3228 else
3230 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3231 false);
3232 add_dependence_list (insn, reg_last->implicit_sets, 0,
3233 REG_DEP_ANTI, false);
3234 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3235 false);
3236 add_dependence_list (insn, reg_last->control_uses, 0,
3237 REG_DEP_CONTROL, false);
3240 if (!deps->readonly)
3242 reg_last->clobbers_length++;
3243 reg_last->clobbers
3244 = alloc_INSN_LIST (insn, reg_last->clobbers);
3247 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3249 struct deps_reg *reg_last = &deps->reg_last[i];
3251 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3252 REG_DEP_OUTPUT, false);
3253 add_dependence_list_and_free (deps, insn,
3254 &reg_last->implicit_sets,
3255 0, REG_DEP_ANTI, false);
3256 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3257 REG_DEP_OUTPUT, false);
3258 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3259 REG_DEP_ANTI, false);
3260 add_dependence_list (insn, reg_last->control_uses, 0,
3261 REG_DEP_CONTROL, false);
3263 if (!deps->readonly)
3265 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3266 reg_last->uses_length = 0;
3267 reg_last->clobbers_length = 0;
3271 if (!deps->readonly)
3273 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3275 struct deps_reg *reg_last = &deps->reg_last[i];
3276 reg_last->control_uses
3277 = alloc_INSN_LIST (insn, reg_last->control_uses);
3282 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3283 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3285 struct deps_reg *reg_last = &deps->reg_last[i];
3286 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
3287 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
3288 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
3289 add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
3290 false);
3292 if (!deps->readonly)
3293 reg_last->implicit_sets
3294 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3297 if (!deps->readonly)
3299 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3300 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3301 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
3302 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3303 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3304 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3305 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3307 /* Set up the pending barrier found. */
3308 deps->last_reg_pending_barrier = reg_pending_barrier;
3311 CLEAR_REG_SET (reg_pending_uses);
3312 CLEAR_REG_SET (reg_pending_clobbers);
3313 CLEAR_REG_SET (reg_pending_sets);
3314 CLEAR_REG_SET (reg_pending_control_uses);
3315 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3316 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3318 /* Add dependencies if a scheduling barrier was found. */
3319 if (reg_pending_barrier)
3321 /* In the case of barrier the most added dependencies are not
3322 real, so we use anti-dependence here. */
3323 if (sched_has_condition_p (insn))
3325 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3327 struct deps_reg *reg_last = &deps->reg_last[i];
3328 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3329 true);
3330 add_dependence_list (insn, reg_last->sets, 0,
3331 reg_pending_barrier == TRUE_BARRIER
3332 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3333 add_dependence_list (insn, reg_last->implicit_sets, 0,
3334 REG_DEP_ANTI, true);
3335 add_dependence_list (insn, reg_last->clobbers, 0,
3336 reg_pending_barrier == TRUE_BARRIER
3337 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3340 else
3342 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3344 struct deps_reg *reg_last = &deps->reg_last[i];
3345 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3346 REG_DEP_ANTI, true);
3347 add_dependence_list_and_free (deps, insn,
3348 &reg_last->control_uses, 0,
3349 REG_DEP_CONTROL, true);
3350 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3351 reg_pending_barrier == TRUE_BARRIER
3352 ? REG_DEP_TRUE : REG_DEP_ANTI,
3353 true);
3354 add_dependence_list_and_free (deps, insn,
3355 &reg_last->implicit_sets, 0,
3356 REG_DEP_ANTI, true);
3357 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3358 reg_pending_barrier == TRUE_BARRIER
3359 ? REG_DEP_TRUE : REG_DEP_ANTI,
3360 true);
3362 if (!deps->readonly)
3364 reg_last->uses_length = 0;
3365 reg_last->clobbers_length = 0;
3370 if (!deps->readonly)
3371 for (i = 0; i < (unsigned)deps->max_reg; i++)
3373 struct deps_reg *reg_last = &deps->reg_last[i];
3374 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3375 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3378 /* Don't flush pending lists on speculative checks for
3379 selective scheduling. */
3380 if (!sel_sched_p () || !sel_insn_is_speculation_check (insn))
3381 flush_pending_lists (deps, insn, true, true);
3383 reg_pending_barrier = NOT_A_BARRIER;
3386 /* If a post-call group is still open, see if it should remain so.
3387 This insn must be a simple move of a hard reg to a pseudo or
3388 vice-versa.
3390 We must avoid moving these insns for correctness on targets
3391 with small register classes, and for special registers like
3392 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3393 hard regs for all targets. */
3395 if (deps->in_post_call_group_p)
3397 rtx tmp, set = single_set (insn);
3398 int src_regno, dest_regno;
3400 if (set == NULL)
3402 if (DEBUG_INSN_P (insn))
3403 /* We don't want to mark debug insns as part of the same
3404 sched group. We know they really aren't, but if we use
3405 debug insns to tell that a call group is over, we'll
3406 get different code if debug insns are not there and
3407 instructions that follow seem like they should be part
3408 of the call group.
3410 Also, if we did, chain_to_prev_insn would move the
3411 deps of the debug insn to the call insn, modifying
3412 non-debug post-dependency counts of the debug insn
3413 dependencies and otherwise messing with the scheduling
3414 order.
3416 Instead, let such debug insns be scheduled freely, but
3417 keep the call group open in case there are insns that
3418 should be part of it afterwards. Since we grant debug
3419 insns higher priority than even sched group insns, it
3420 will all turn out all right. */
3421 goto debug_dont_end_call_group;
3422 else
3423 goto end_call_group;
3426 tmp = SET_DEST (set);
3427 if (GET_CODE (tmp) == SUBREG)
3428 tmp = SUBREG_REG (tmp);
3429 if (REG_P (tmp))
3430 dest_regno = REGNO (tmp);
3431 else
3432 goto end_call_group;
3434 tmp = SET_SRC (set);
3435 if (GET_CODE (tmp) == SUBREG)
3436 tmp = SUBREG_REG (tmp);
3437 if ((GET_CODE (tmp) == PLUS
3438 || GET_CODE (tmp) == MINUS)
3439 && REG_P (XEXP (tmp, 0))
3440 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3441 && dest_regno == STACK_POINTER_REGNUM)
3442 src_regno = STACK_POINTER_REGNUM;
3443 else if (REG_P (tmp))
3444 src_regno = REGNO (tmp);
3445 else
3446 goto end_call_group;
3448 if (src_regno < FIRST_PSEUDO_REGISTER
3449 || dest_regno < FIRST_PSEUDO_REGISTER)
3451 if (!deps->readonly
3452 && deps->in_post_call_group_p == post_call_initial)
3453 deps->in_post_call_group_p = post_call;
3455 if (!sel_sched_p () || sched_emulate_haifa_p)
3457 SCHED_GROUP_P (insn) = 1;
3458 CANT_MOVE (insn) = 1;
3461 else
3463 end_call_group:
3464 if (!deps->readonly)
3465 deps->in_post_call_group_p = not_post_call;
3469 debug_dont_end_call_group:
3470 if ((current_sched_info->flags & DO_SPECULATION)
3471 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3472 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3473 be speculated. */
3475 if (sel_sched_p ())
3476 sel_mark_hard_insn (insn);
3477 else
3479 sd_iterator_def sd_it;
3480 dep_t dep;
3482 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3483 sd_iterator_cond (&sd_it, &dep);)
3484 change_spec_dep_to_hard (sd_it);
3488 /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
3489 honor their original ordering. */
3490 if (find_reg_note (insn, REG_ARGS_SIZE, NULL))
3492 if (deps->last_args_size)
3493 add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT);
3494 deps->last_args_size = insn;
3498 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3499 longjmp, loop forever, ...). */
3500 /* FIXME: Why can't this function just use flags_from_decl_or_type and
3501 test for ECF_NORETURN? */
3502 static bool
3503 call_may_noreturn_p (rtx insn)
3505 rtx call;
3507 /* const or pure calls that aren't looping will always return. */
3508 if (RTL_CONST_OR_PURE_CALL_P (insn)
3509 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3510 return false;
3512 call = get_call_rtx_from (insn);
3513 if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3515 rtx symbol = XEXP (XEXP (call, 0), 0);
3516 if (SYMBOL_REF_DECL (symbol)
3517 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3519 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3520 == BUILT_IN_NORMAL)
3521 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3523 case BUILT_IN_BCMP:
3524 case BUILT_IN_BCOPY:
3525 case BUILT_IN_BZERO:
3526 case BUILT_IN_INDEX:
3527 case BUILT_IN_MEMCHR:
3528 case BUILT_IN_MEMCMP:
3529 case BUILT_IN_MEMCPY:
3530 case BUILT_IN_MEMMOVE:
3531 case BUILT_IN_MEMPCPY:
3532 case BUILT_IN_MEMSET:
3533 case BUILT_IN_RINDEX:
3534 case BUILT_IN_STPCPY:
3535 case BUILT_IN_STPNCPY:
3536 case BUILT_IN_STRCAT:
3537 case BUILT_IN_STRCHR:
3538 case BUILT_IN_STRCMP:
3539 case BUILT_IN_STRCPY:
3540 case BUILT_IN_STRCSPN:
3541 case BUILT_IN_STRLEN:
3542 case BUILT_IN_STRNCAT:
3543 case BUILT_IN_STRNCMP:
3544 case BUILT_IN_STRNCPY:
3545 case BUILT_IN_STRPBRK:
3546 case BUILT_IN_STRRCHR:
3547 case BUILT_IN_STRSPN:
3548 case BUILT_IN_STRSTR:
3549 /* Assume certain string/memory builtins always return. */
3550 return false;
3551 default:
3552 break;
3557 /* For all other calls assume that they might not always return. */
3558 return true;
3561 /* Return true if INSN should be made dependent on the previous instruction
3562 group, and if all INSN's dependencies should be moved to the first
3563 instruction of that group. */
3565 static bool
3566 chain_to_prev_insn_p (rtx insn)
3568 rtx prev, x;
3570 /* INSN forms a group with the previous instruction. */
3571 if (SCHED_GROUP_P (insn))
3572 return true;
3574 /* If the previous instruction clobbers a register R and this one sets
3575 part of R, the clobber was added specifically to help us track the
3576 liveness of R. There's no point scheduling the clobber and leaving
3577 INSN behind, especially if we move the clobber to another block. */
3578 prev = prev_nonnote_nondebug_insn (insn);
3579 if (prev
3580 && INSN_P (prev)
3581 && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
3582 && GET_CODE (PATTERN (prev)) == CLOBBER)
3584 x = XEXP (PATTERN (prev), 0);
3585 if (set_of (x, insn))
3586 return true;
3589 return false;
3592 /* Analyze INSN with DEPS as a context. */
3593 void
3594 deps_analyze_insn (struct deps_desc *deps, rtx insn)
3596 if (sched_deps_info->start_insn)
3597 sched_deps_info->start_insn (insn);
3599 /* Record the condition for this insn. */
3600 if (NONDEBUG_INSN_P (insn))
3602 rtx t;
3603 sched_get_condition_with_rev (insn, NULL);
3604 t = INSN_CACHED_COND (insn);
3605 INSN_COND_DEPS (insn) = NULL_RTX;
3606 if (reload_completed
3607 && (current_sched_info->flags & DO_PREDICATION)
3608 && COMPARISON_P (t)
3609 && REG_P (XEXP (t, 0))
3610 && CONSTANT_P (XEXP (t, 1)))
3612 unsigned int regno;
3613 int nregs;
3614 t = XEXP (t, 0);
3615 regno = REGNO (t);
3616 nregs = hard_regno_nregs[regno][GET_MODE (t)];
3617 t = NULL_RTX;
3618 while (nregs-- > 0)
3620 struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
3621 t = concat_INSN_LIST (reg_last->sets, t);
3622 t = concat_INSN_LIST (reg_last->clobbers, t);
3623 t = concat_INSN_LIST (reg_last->implicit_sets, t);
3625 INSN_COND_DEPS (insn) = t;
3629 if (JUMP_P (insn))
3631 /* Make each JUMP_INSN (but not a speculative check)
3632 a scheduling barrier for memory references. */
3633 if (!deps->readonly
3634 && !(sel_sched_p ()
3635 && sel_insn_is_speculation_check (insn)))
3637 /* Keep the list a reasonable size. */
3638 if (deps->pending_flush_length++ >= MAX_PENDING_LIST_LENGTH)
3639 flush_pending_lists (deps, insn, true, true);
3640 else
3641 deps->pending_jump_insns
3642 = alloc_INSN_LIST (insn, deps->pending_jump_insns);
3645 /* For each insn which shouldn't cross a jump, add a dependence. */
3646 add_dependence_list_and_free (deps, insn,
3647 &deps->sched_before_next_jump, 1,
3648 REG_DEP_ANTI, true);
3650 sched_analyze_insn (deps, PATTERN (insn), insn);
3652 else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3654 sched_analyze_insn (deps, PATTERN (insn), insn);
3656 else if (CALL_P (insn))
3658 int i;
3660 CANT_MOVE (insn) = 1;
3662 if (find_reg_note (insn, REG_SETJMP, NULL))
3664 /* This is setjmp. Assume that all registers, not just
3665 hard registers, may be clobbered by this call. */
3666 reg_pending_barrier = MOVE_BARRIER;
3668 else
3670 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3671 /* A call may read and modify global register variables. */
3672 if (global_regs[i])
3674 SET_REGNO_REG_SET (reg_pending_sets, i);
3675 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3677 /* Other call-clobbered hard regs may be clobbered.
3678 Since we only have a choice between 'might be clobbered'
3679 and 'definitely not clobbered', we must include all
3680 partly call-clobbered registers here. */
3681 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3682 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3683 SET_REGNO_REG_SET (reg_pending_clobbers, i);
3684 /* We don't know what set of fixed registers might be used
3685 by the function, but it is certain that the stack pointer
3686 is among them, but be conservative. */
3687 else if (fixed_regs[i])
3688 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3689 /* The frame pointer is normally not used by the function
3690 itself, but by the debugger. */
3691 /* ??? MIPS o32 is an exception. It uses the frame pointer
3692 in the macro expansion of jal but does not represent this
3693 fact in the call_insn rtl. */
3694 else if (i == FRAME_POINTER_REGNUM
3695 || (i == HARD_FRAME_POINTER_REGNUM
3696 && (! reload_completed || frame_pointer_needed)))
3697 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3700 /* For each insn which shouldn't cross a call, add a dependence
3701 between that insn and this call insn. */
3702 add_dependence_list_and_free (deps, insn,
3703 &deps->sched_before_next_call, 1,
3704 REG_DEP_ANTI, true);
3706 sched_analyze_insn (deps, PATTERN (insn), insn);
3708 /* If CALL would be in a sched group, then this will violate
3709 convention that sched group insns have dependencies only on the
3710 previous instruction.
3712 Of course one can say: "Hey! What about head of the sched group?"
3713 And I will answer: "Basic principles (one dep per insn) are always
3714 the same." */
3715 gcc_assert (!SCHED_GROUP_P (insn));
3717 /* In the absence of interprocedural alias analysis, we must flush
3718 all pending reads and writes, and start new dependencies starting
3719 from here. But only flush writes for constant calls (which may
3720 be passed a pointer to something we haven't written yet). */
3721 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3723 if (!deps->readonly)
3725 /* Remember the last function call for limiting lifetimes. */
3726 free_INSN_LIST_list (&deps->last_function_call);
3727 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3729 if (call_may_noreturn_p (insn))
3731 /* Remember the last function call that might not always return
3732 normally for limiting moves of trapping insns. */
3733 free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3734 deps->last_function_call_may_noreturn
3735 = alloc_INSN_LIST (insn, NULL_RTX);
3738 /* Before reload, begin a post-call group, so as to keep the
3739 lifetimes of hard registers correct. */
3740 if (! reload_completed)
3741 deps->in_post_call_group_p = post_call;
3745 if (sched_deps_info->use_cselib)
3746 cselib_process_insn (insn);
3748 if (sched_deps_info->finish_insn)
3749 sched_deps_info->finish_insn ();
3751 /* Fixup the dependencies in the sched group. */
3752 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3753 && chain_to_prev_insn_p (insn)
3754 && !sel_sched_p ())
3755 chain_to_prev_insn (insn);
3758 /* Initialize DEPS for the new block beginning with HEAD. */
3759 void
3760 deps_start_bb (struct deps_desc *deps, rtx head)
3762 gcc_assert (!deps->readonly);
3764 /* Before reload, if the previous block ended in a call, show that
3765 we are inside a post-call group, so as to keep the lifetimes of
3766 hard registers correct. */
3767 if (! reload_completed && !LABEL_P (head))
3769 rtx insn = prev_nonnote_nondebug_insn (head);
3771 if (insn && CALL_P (insn))
3772 deps->in_post_call_group_p = post_call_initial;
3776 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3777 dependencies for each insn. */
3778 void
3779 sched_analyze (struct deps_desc *deps, rtx head, rtx tail)
3781 rtx insn;
3783 if (sched_deps_info->use_cselib)
3784 cselib_init (CSELIB_RECORD_MEMORY);
3786 deps_start_bb (deps, head);
3788 for (insn = head;; insn = NEXT_INSN (insn))
3791 if (INSN_P (insn))
3793 /* And initialize deps_lists. */
3794 sd_init_insn (insn);
3795 /* Clean up SCHED_GROUP_P which may be set by last
3796 scheduler pass. */
3797 if (SCHED_GROUP_P (insn))
3798 SCHED_GROUP_P (insn) = 0;
3801 deps_analyze_insn (deps, insn);
3803 if (insn == tail)
3805 if (sched_deps_info->use_cselib)
3806 cselib_finish ();
3807 return;
3810 gcc_unreachable ();
3813 /* Helper for sched_free_deps ().
3814 Delete INSN's (RESOLVED_P) backward dependencies. */
3815 static void
3816 delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
3818 sd_iterator_def sd_it;
3819 dep_t dep;
3820 sd_list_types_def types;
3822 if (resolved_p)
3823 types = SD_LIST_RES_BACK;
3824 else
3825 types = SD_LIST_BACK;
3827 for (sd_it = sd_iterator_start (insn, types);
3828 sd_iterator_cond (&sd_it, &dep);)
3830 dep_link_t link = *sd_it.linkp;
3831 dep_node_t node = DEP_LINK_NODE (link);
3832 deps_list_t back_list;
3833 deps_list_t forw_list;
3835 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3836 remove_from_deps_list (link, back_list);
3837 delete_dep_node (node);
3841 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3842 deps_lists. */
3843 void
3844 sched_free_deps (rtx head, rtx tail, bool resolved_p)
3846 rtx insn;
3847 rtx next_tail = NEXT_INSN (tail);
3849 /* We make two passes since some insns may be scheduled before their
3850 dependencies are resolved. */
3851 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3852 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3854 /* Clear forward deps and leave the dep_nodes to the
3855 corresponding back_deps list. */
3856 if (resolved_p)
3857 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3858 else
3859 clear_deps_list (INSN_FORW_DEPS (insn));
3861 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3862 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3864 /* Clear resolved back deps together with its dep_nodes. */
3865 delete_dep_nodes_in_back_deps (insn, resolved_p);
3867 sd_finish_insn (insn);
3871 /* Initialize variables for region data dependence analysis.
3872 When LAZY_REG_LAST is true, do not allocate reg_last array
3873 of struct deps_desc immediately. */
3875 void
3876 init_deps (struct deps_desc *deps, bool lazy_reg_last)
3878 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3880 deps->max_reg = max_reg;
3881 if (lazy_reg_last)
3882 deps->reg_last = NULL;
3883 else
3884 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3885 INIT_REG_SET (&deps->reg_last_in_use);
3887 deps->pending_read_insns = 0;
3888 deps->pending_read_mems = 0;
3889 deps->pending_write_insns = 0;
3890 deps->pending_write_mems = 0;
3891 deps->pending_jump_insns = 0;
3892 deps->pending_read_list_length = 0;
3893 deps->pending_write_list_length = 0;
3894 deps->pending_flush_length = 0;
3895 deps->last_pending_memory_flush = 0;
3896 deps->last_function_call = 0;
3897 deps->last_function_call_may_noreturn = 0;
3898 deps->sched_before_next_call = 0;
3899 deps->sched_before_next_jump = 0;
3900 deps->in_post_call_group_p = not_post_call;
3901 deps->last_debug_insn = 0;
3902 deps->last_args_size = 0;
3903 deps->last_reg_pending_barrier = NOT_A_BARRIER;
3904 deps->readonly = 0;
3907 /* Init only reg_last field of DEPS, which was not allocated before as
3908 we inited DEPS lazily. */
3909 void
3910 init_deps_reg_last (struct deps_desc *deps)
3912 gcc_assert (deps && deps->max_reg > 0);
3913 gcc_assert (deps->reg_last == NULL);
3915 deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3919 /* Free insn lists found in DEPS. */
3921 void
3922 free_deps (struct deps_desc *deps)
3924 unsigned i;
3925 reg_set_iterator rsi;
3927 /* We set max_reg to 0 when this context was already freed. */
3928 if (deps->max_reg == 0)
3930 gcc_assert (deps->reg_last == NULL);
3931 return;
3933 deps->max_reg = 0;
3935 free_INSN_LIST_list (&deps->pending_read_insns);
3936 free_EXPR_LIST_list (&deps->pending_read_mems);
3937 free_INSN_LIST_list (&deps->pending_write_insns);
3938 free_EXPR_LIST_list (&deps->pending_write_mems);
3939 free_INSN_LIST_list (&deps->last_pending_memory_flush);
3941 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3942 times. For a testcase with 42000 regs and 8000 small basic blocks,
3943 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
3944 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3946 struct deps_reg *reg_last = &deps->reg_last[i];
3947 if (reg_last->uses)
3948 free_INSN_LIST_list (&reg_last->uses);
3949 if (reg_last->sets)
3950 free_INSN_LIST_list (&reg_last->sets);
3951 if (reg_last->implicit_sets)
3952 free_INSN_LIST_list (&reg_last->implicit_sets);
3953 if (reg_last->control_uses)
3954 free_INSN_LIST_list (&reg_last->control_uses);
3955 if (reg_last->clobbers)
3956 free_INSN_LIST_list (&reg_last->clobbers);
3958 CLEAR_REG_SET (&deps->reg_last_in_use);
3960 /* As we initialize reg_last lazily, it is possible that we didn't allocate
3961 it at all. */
3962 free (deps->reg_last);
3963 deps->reg_last = NULL;
3965 deps = NULL;
3968 /* Remove INSN from dependence contexts DEPS. */
3969 void
3970 remove_from_deps (struct deps_desc *deps, rtx insn)
3972 int removed;
3973 unsigned i;
3974 reg_set_iterator rsi;
3976 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3977 &deps->pending_read_mems);
3978 if (!DEBUG_INSN_P (insn))
3979 deps->pending_read_list_length -= removed;
3980 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3981 &deps->pending_write_mems);
3982 deps->pending_write_list_length -= removed;
3984 removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
3985 deps->pending_flush_length -= removed;
3986 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
3987 deps->pending_flush_length -= removed;
3989 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3991 struct deps_reg *reg_last = &deps->reg_last[i];
3992 if (reg_last->uses)
3993 remove_from_dependence_list (insn, &reg_last->uses);
3994 if (reg_last->sets)
3995 remove_from_dependence_list (insn, &reg_last->sets);
3996 if (reg_last->implicit_sets)
3997 remove_from_dependence_list (insn, &reg_last->implicit_sets);
3998 if (reg_last->clobbers)
3999 remove_from_dependence_list (insn, &reg_last->clobbers);
4000 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
4001 && !reg_last->clobbers)
4002 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
4005 if (CALL_P (insn))
4007 remove_from_dependence_list (insn, &deps->last_function_call);
4008 remove_from_dependence_list (insn,
4009 &deps->last_function_call_may_noreturn);
4011 remove_from_dependence_list (insn, &deps->sched_before_next_call);
4014 /* Init deps data vector. */
4015 static void
4016 init_deps_data_vector (void)
4018 int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
4019 if (reserve > 0 && ! h_d_i_d.space (reserve))
4020 h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2);
4023 /* If it is profitable to use them, initialize or extend (depending on
4024 GLOBAL_P) dependency data. */
4025 void
4026 sched_deps_init (bool global_p)
4028 /* Average number of insns in the basic block.
4029 '+ 1' is used to make it nonzero. */
4030 int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
4032 init_deps_data_vector ();
4034 /* We use another caching mechanism for selective scheduling, so
4035 we don't use this one. */
4036 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
4038 /* ?!? We could save some memory by computing a per-region luid mapping
4039 which could reduce both the number of vectors in the cache and the
4040 size of each vector. Instead we just avoid the cache entirely unless
4041 the average number of instructions in a basic block is very high. See
4042 the comment before the declaration of true_dependency_cache for
4043 what we consider "very high". */
4044 cache_size = 0;
4045 extend_dependency_caches (sched_max_luid, true);
4048 if (global_p)
4050 dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
4051 /* Allocate lists for one block at a time. */
4052 insns_in_block);
4053 dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
4054 /* Allocate nodes for one block at a time.
4055 We assume that average insn has
4056 5 producers. */
4057 5 * insns_in_block);
4062 /* Create or extend (depending on CREATE_P) dependency caches to
4063 size N. */
4064 void
4065 extend_dependency_caches (int n, bool create_p)
4067 if (create_p || true_dependency_cache)
4069 int i, luid = cache_size + n;
4071 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
4072 luid);
4073 output_dependency_cache = XRESIZEVEC (bitmap_head,
4074 output_dependency_cache, luid);
4075 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
4076 luid);
4077 control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
4078 luid);
4080 if (current_sched_info->flags & DO_SPECULATION)
4081 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
4082 luid);
4084 for (i = cache_size; i < luid; i++)
4086 bitmap_initialize (&true_dependency_cache[i], 0);
4087 bitmap_initialize (&output_dependency_cache[i], 0);
4088 bitmap_initialize (&anti_dependency_cache[i], 0);
4089 bitmap_initialize (&control_dependency_cache[i], 0);
4091 if (current_sched_info->flags & DO_SPECULATION)
4092 bitmap_initialize (&spec_dependency_cache[i], 0);
4094 cache_size = luid;
4098 /* Finalize dependency information for the whole function. */
4099 void
4100 sched_deps_finish (void)
4102 gcc_assert (deps_pools_are_empty_p ());
4103 free_alloc_pool_if_empty (&dn_pool);
4104 free_alloc_pool_if_empty (&dl_pool);
4105 gcc_assert (dn_pool == NULL && dl_pool == NULL);
4107 h_d_i_d.release ();
4108 cache_size = 0;
4110 if (true_dependency_cache)
4112 int i;
4114 for (i = 0; i < cache_size; i++)
4116 bitmap_clear (&true_dependency_cache[i]);
4117 bitmap_clear (&output_dependency_cache[i]);
4118 bitmap_clear (&anti_dependency_cache[i]);
4119 bitmap_clear (&control_dependency_cache[i]);
4121 if (sched_deps_info->generate_spec_deps)
4122 bitmap_clear (&spec_dependency_cache[i]);
4124 free (true_dependency_cache);
4125 true_dependency_cache = NULL;
4126 free (output_dependency_cache);
4127 output_dependency_cache = NULL;
4128 free (anti_dependency_cache);
4129 anti_dependency_cache = NULL;
4130 free (control_dependency_cache);
4131 control_dependency_cache = NULL;
4133 if (sched_deps_info->generate_spec_deps)
4135 free (spec_dependency_cache);
4136 spec_dependency_cache = NULL;
4142 /* Initialize some global variables needed by the dependency analysis
4143 code. */
4145 void
4146 init_deps_global (void)
4148 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
4149 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
4150 reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
4151 reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
4152 reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
4153 reg_pending_control_uses = ALLOC_REG_SET (&reg_obstack);
4154 reg_pending_barrier = NOT_A_BARRIER;
4156 if (!sel_sched_p () || sched_emulate_haifa_p)
4158 sched_deps_info->start_insn = haifa_start_insn;
4159 sched_deps_info->finish_insn = haifa_finish_insn;
4161 sched_deps_info->note_reg_set = haifa_note_reg_set;
4162 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4163 sched_deps_info->note_reg_use = haifa_note_reg_use;
4165 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4166 sched_deps_info->note_dep = haifa_note_dep;
4170 /* Free everything used by the dependency analysis code. */
4172 void
4173 finish_deps_global (void)
4175 FREE_REG_SET (reg_pending_sets);
4176 FREE_REG_SET (reg_pending_clobbers);
4177 FREE_REG_SET (reg_pending_uses);
4178 FREE_REG_SET (reg_pending_control_uses);
4181 /* Estimate the weakness of dependence between MEM1 and MEM2. */
4182 dw_t
4183 estimate_dep_weak (rtx mem1, rtx mem2)
4185 rtx r1, r2;
4187 if (mem1 == mem2)
4188 /* MEMs are the same - don't speculate. */
4189 return MIN_DEP_WEAK;
4191 r1 = XEXP (mem1, 0);
4192 r2 = XEXP (mem2, 0);
4194 if (r1 == r2
4195 || (REG_P (r1) && REG_P (r2)
4196 && REGNO (r1) == REGNO (r2)))
4197 /* Again, MEMs are the same. */
4198 return MIN_DEP_WEAK;
4199 else if ((REG_P (r1) && !REG_P (r2))
4200 || (!REG_P (r1) && REG_P (r2)))
4201 /* Different addressing modes - reason to be more speculative,
4202 than usual. */
4203 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4204 else
4205 /* We can't say anything about the dependence. */
4206 return UNCERTAIN_DEP_WEAK;
4209 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4210 This function can handle same INSN and ELEM (INSN == ELEM).
4211 It is a convenience wrapper. */
4212 static void
4213 add_dependence_1 (rtx insn, rtx elem, enum reg_note dep_type)
4215 ds_t ds;
4216 bool internal;
4218 if (dep_type == REG_DEP_TRUE)
4219 ds = DEP_TRUE;
4220 else if (dep_type == REG_DEP_OUTPUT)
4221 ds = DEP_OUTPUT;
4222 else if (dep_type == REG_DEP_CONTROL)
4223 ds = DEP_CONTROL;
4224 else
4226 gcc_assert (dep_type == REG_DEP_ANTI);
4227 ds = DEP_ANTI;
4230 /* When add_dependence is called from inside sched-deps.c, we expect
4231 cur_insn to be non-null. */
4232 internal = cur_insn != NULL;
4233 if (internal)
4234 gcc_assert (insn == cur_insn);
4235 else
4236 cur_insn = insn;
4238 note_dep (elem, ds);
4239 if (!internal)
4240 cur_insn = NULL;
4243 /* Return weakness of speculative type TYPE in the dep_status DS,
4244 without checking to prevent ICEs on malformed input. */
4245 static dw_t
4246 get_dep_weak_1 (ds_t ds, ds_t type)
4248 ds = ds & type;
4250 switch (type)
4252 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4253 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4254 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4255 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4256 default: gcc_unreachable ();
4259 return (dw_t) ds;
4262 /* Return weakness of speculative type TYPE in the dep_status DS. */
4263 dw_t
4264 get_dep_weak (ds_t ds, ds_t type)
4266 dw_t dw = get_dep_weak_1 (ds, type);
4268 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4269 return dw;
4272 /* Return the dep_status, which has the same parameters as DS, except for
4273 speculative type TYPE, that will have weakness DW. */
4274 ds_t
4275 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4277 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4279 ds &= ~type;
4280 switch (type)
4282 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4283 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4284 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4285 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4286 default: gcc_unreachable ();
4288 return ds;
4291 /* Return the join of two dep_statuses DS1 and DS2.
4292 If MAX_P is true then choose the greater probability,
4293 otherwise multiply probabilities.
4294 This function assumes that both DS1 and DS2 contain speculative bits. */
4295 static ds_t
4296 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
4298 ds_t ds, t;
4300 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4302 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4304 t = FIRST_SPEC_TYPE;
4307 if ((ds1 & t) && !(ds2 & t))
4308 ds |= ds1 & t;
4309 else if (!(ds1 & t) && (ds2 & t))
4310 ds |= ds2 & t;
4311 else if ((ds1 & t) && (ds2 & t))
4313 dw_t dw1 = get_dep_weak (ds1, t);
4314 dw_t dw2 = get_dep_weak (ds2, t);
4315 ds_t dw;
4317 if (!max_p)
4319 dw = ((ds_t) dw1) * ((ds_t) dw2);
4320 dw /= MAX_DEP_WEAK;
4321 if (dw < MIN_DEP_WEAK)
4322 dw = MIN_DEP_WEAK;
4324 else
4326 if (dw1 >= dw2)
4327 dw = dw1;
4328 else
4329 dw = dw2;
4332 ds = set_dep_weak (ds, t, (dw_t) dw);
4335 if (t == LAST_SPEC_TYPE)
4336 break;
4337 t <<= SPEC_TYPE_SHIFT;
4339 while (1);
4341 return ds;
4344 /* Return the join of two dep_statuses DS1 and DS2.
4345 This function assumes that both DS1 and DS2 contain speculative bits. */
4346 ds_t
4347 ds_merge (ds_t ds1, ds_t ds2)
4349 return ds_merge_1 (ds1, ds2, false);
4352 /* Return the join of two dep_statuses DS1 and DS2. */
4353 ds_t
4354 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4356 ds_t new_status = ds | ds2;
4358 if (new_status & SPECULATIVE)
4360 if ((ds && !(ds & SPECULATIVE))
4361 || (ds2 && !(ds2 & SPECULATIVE)))
4362 /* Then this dep can't be speculative. */
4363 new_status &= ~SPECULATIVE;
4364 else
4366 /* Both are speculative. Merging probabilities. */
4367 if (mem1)
4369 dw_t dw;
4371 dw = estimate_dep_weak (mem1, mem2);
4372 ds = set_dep_weak (ds, BEGIN_DATA, dw);
4375 if (!ds)
4376 new_status = ds2;
4377 else if (!ds2)
4378 new_status = ds;
4379 else
4380 new_status = ds_merge (ds2, ds);
4384 return new_status;
4387 /* Return the join of DS1 and DS2. Use maximum instead of multiplying
4388 probabilities. */
4389 ds_t
4390 ds_max_merge (ds_t ds1, ds_t ds2)
4392 if (ds1 == 0 && ds2 == 0)
4393 return 0;
4395 if (ds1 == 0 && ds2 != 0)
4396 return ds2;
4398 if (ds1 != 0 && ds2 == 0)
4399 return ds1;
4401 return ds_merge_1 (ds1, ds2, true);
4404 /* Return the probability of speculation success for the speculation
4405 status DS. */
4406 dw_t
4407 ds_weak (ds_t ds)
4409 ds_t res = 1, dt;
4410 int n = 0;
4412 dt = FIRST_SPEC_TYPE;
4415 if (ds & dt)
4417 res *= (ds_t) get_dep_weak (ds, dt);
4418 n++;
4421 if (dt == LAST_SPEC_TYPE)
4422 break;
4423 dt <<= SPEC_TYPE_SHIFT;
4425 while (1);
4427 gcc_assert (n);
4428 while (--n)
4429 res /= MAX_DEP_WEAK;
4431 if (res < MIN_DEP_WEAK)
4432 res = MIN_DEP_WEAK;
4434 gcc_assert (res <= MAX_DEP_WEAK);
4436 return (dw_t) res;
4439 /* Return a dep status that contains all speculation types of DS. */
4440 ds_t
4441 ds_get_speculation_types (ds_t ds)
4443 if (ds & BEGIN_DATA)
4444 ds |= BEGIN_DATA;
4445 if (ds & BE_IN_DATA)
4446 ds |= BE_IN_DATA;
4447 if (ds & BEGIN_CONTROL)
4448 ds |= BEGIN_CONTROL;
4449 if (ds & BE_IN_CONTROL)
4450 ds |= BE_IN_CONTROL;
4452 return ds & SPECULATIVE;
4455 /* Return a dep status that contains maximal weakness for each speculation
4456 type present in DS. */
4457 ds_t
4458 ds_get_max_dep_weak (ds_t ds)
4460 if (ds & BEGIN_DATA)
4461 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4462 if (ds & BE_IN_DATA)
4463 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4464 if (ds & BEGIN_CONTROL)
4465 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4466 if (ds & BE_IN_CONTROL)
4467 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4469 return ds;
4472 /* Dump information about the dependence status S. */
4473 static void
4474 dump_ds (FILE *f, ds_t s)
4476 fprintf (f, "{");
4478 if (s & BEGIN_DATA)
4479 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4480 if (s & BE_IN_DATA)
4481 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4482 if (s & BEGIN_CONTROL)
4483 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4484 if (s & BE_IN_CONTROL)
4485 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4487 if (s & HARD_DEP)
4488 fprintf (f, "HARD_DEP; ");
4490 if (s & DEP_TRUE)
4491 fprintf (f, "DEP_TRUE; ");
4492 if (s & DEP_OUTPUT)
4493 fprintf (f, "DEP_OUTPUT; ");
4494 if (s & DEP_ANTI)
4495 fprintf (f, "DEP_ANTI; ");
4496 if (s & DEP_CONTROL)
4497 fprintf (f, "DEP_CONTROL; ");
4499 fprintf (f, "}");
4502 DEBUG_FUNCTION void
4503 debug_ds (ds_t s)
4505 dump_ds (stderr, s);
4506 fprintf (stderr, "\n");
4509 #ifdef ENABLE_CHECKING
4510 /* Verify that dependence type and status are consistent.
4511 If RELAXED_P is true, then skip dep_weakness checks. */
4512 static void
4513 check_dep (dep_t dep, bool relaxed_p)
4515 enum reg_note dt = DEP_TYPE (dep);
4516 ds_t ds = DEP_STATUS (dep);
4518 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4520 if (!(current_sched_info->flags & USE_DEPS_LIST))
4522 gcc_assert (ds == 0);
4523 return;
4526 /* Check that dependence type contains the same bits as the status. */
4527 if (dt == REG_DEP_TRUE)
4528 gcc_assert (ds & DEP_TRUE);
4529 else if (dt == REG_DEP_OUTPUT)
4530 gcc_assert ((ds & DEP_OUTPUT)
4531 && !(ds & DEP_TRUE));
4532 else if (dt == REG_DEP_ANTI)
4533 gcc_assert ((ds & DEP_ANTI)
4534 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
4535 else
4536 gcc_assert (dt == REG_DEP_CONTROL
4537 && (ds & DEP_CONTROL)
4538 && !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
4540 /* HARD_DEP can not appear in dep_status of a link. */
4541 gcc_assert (!(ds & HARD_DEP));
4543 /* Check that dependence status is set correctly when speculation is not
4544 supported. */
4545 if (!sched_deps_info->generate_spec_deps)
4546 gcc_assert (!(ds & SPECULATIVE));
4547 else if (ds & SPECULATIVE)
4549 if (!relaxed_p)
4551 ds_t type = FIRST_SPEC_TYPE;
4553 /* Check that dependence weakness is in proper range. */
4556 if (ds & type)
4557 get_dep_weak (ds, type);
4559 if (type == LAST_SPEC_TYPE)
4560 break;
4561 type <<= SPEC_TYPE_SHIFT;
4563 while (1);
4566 if (ds & BEGIN_SPEC)
4568 /* Only true dependence can be data speculative. */
4569 if (ds & BEGIN_DATA)
4570 gcc_assert (ds & DEP_TRUE);
4572 /* Control dependencies in the insn scheduler are represented by
4573 anti-dependencies, therefore only anti dependence can be
4574 control speculative. */
4575 if (ds & BEGIN_CONTROL)
4576 gcc_assert (ds & DEP_ANTI);
4578 else
4580 /* Subsequent speculations should resolve true dependencies. */
4581 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4584 /* Check that true and anti dependencies can't have other speculative
4585 statuses. */
4586 if (ds & DEP_TRUE)
4587 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4588 /* An output dependence can't be speculative at all. */
4589 gcc_assert (!(ds & DEP_OUTPUT));
4590 if (ds & DEP_ANTI)
4591 gcc_assert (ds & BEGIN_CONTROL);
4594 #endif /* ENABLE_CHECKING */
4596 /* The following code discovers opportunities to switch a memory reference
4597 and an increment by modifying the address. We ensure that this is done
4598 only for dependencies that are only used to show a single register
4599 dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4600 instruction involved is subject to only one dep that can cause a pattern
4601 change.
4603 When we discover a suitable dependency, we fill in the dep_replacement
4604 structure to show how to modify the memory reference. */
4606 /* Holds information about a pair of memory reference and register increment
4607 insns which depend on each other, but could possibly be interchanged. */
4608 struct mem_inc_info
4610 rtx inc_insn;
4611 rtx mem_insn;
4613 rtx *mem_loc;
4614 /* A register occurring in the memory address for which we wish to break
4615 the dependence. This must be identical to the destination register of
4616 the increment. */
4617 rtx mem_reg0;
4618 /* Any kind of index that is added to that register. */
4619 rtx mem_index;
4620 /* The constant offset used in the memory address. */
4621 HOST_WIDE_INT mem_constant;
4622 /* The constant added in the increment insn. Negated if the increment is
4623 after the memory address. */
4624 HOST_WIDE_INT inc_constant;
4625 /* The source register used in the increment. May be different from mem_reg0
4626 if the increment occurs before the memory address. */
4627 rtx inc_input;
4630 /* Verify that the memory location described in MII can be replaced with
4631 one using NEW_ADDR. Return the new memory reference or NULL_RTX. The
4632 insn remains unchanged by this function. */
4634 static rtx
4635 attempt_change (struct mem_inc_info *mii, rtx new_addr)
4637 rtx mem = *mii->mem_loc;
4638 rtx new_mem;
4640 /* Jump through a lot of hoops to keep the attributes up to date. We
4641 do not want to call one of the change address variants that take
4642 an offset even though we know the offset in many cases. These
4643 assume you are changing where the address is pointing by the
4644 offset. */
4645 new_mem = replace_equiv_address_nv (mem, new_addr);
4646 if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
4648 if (sched_verbose >= 5)
4649 fprintf (sched_dump, "validation failure\n");
4650 return NULL_RTX;
4653 /* Put back the old one. */
4654 validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
4656 return new_mem;
4659 /* Return true if INSN is of a form "a = b op c" where a and b are
4660 regs. op is + if c is a reg and +|- if c is a const. Fill in
4661 informantion in MII about what is found.
4662 BEFORE_MEM indicates whether the increment is found before or after
4663 a corresponding memory reference. */
4665 static bool
4666 parse_add_or_inc (struct mem_inc_info *mii, rtx insn, bool before_mem)
4668 rtx pat = single_set (insn);
4669 rtx src, cst;
4670 bool regs_equal;
4672 if (RTX_FRAME_RELATED_P (insn) || !pat)
4673 return false;
4675 /* Result must be single reg. */
4676 if (!REG_P (SET_DEST (pat)))
4677 return false;
4679 if (GET_CODE (SET_SRC (pat)) != PLUS)
4680 return false;
4682 mii->inc_insn = insn;
4683 src = SET_SRC (pat);
4684 mii->inc_input = XEXP (src, 0);
4686 if (!REG_P (XEXP (src, 0)))
4687 return false;
4689 if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
4690 return false;
4692 cst = XEXP (src, 1);
4693 if (!CONST_INT_P (cst))
4694 return false;
4695 mii->inc_constant = INTVAL (cst);
4697 regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
4699 if (!before_mem)
4701 mii->inc_constant = -mii->inc_constant;
4702 if (!regs_equal)
4703 return false;
4706 if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
4708 /* Note that the sign has already been reversed for !before_mem. */
4709 #ifdef STACK_GROWS_DOWNWARD
4710 return mii->inc_constant > 0;
4711 #else
4712 return mii->inc_constant < 0;
4713 #endif
4715 return true;
4718 /* Once a suitable mem reference has been found and the corresponding data
4719 in MII has been filled in, this function is called to find a suitable
4720 add or inc insn involving the register we found in the memory
4721 reference. */
4723 static bool
4724 find_inc (struct mem_inc_info *mii, bool backwards)
4726 sd_iterator_def sd_it;
4727 dep_t dep;
4729 sd_it = sd_iterator_start (mii->mem_insn,
4730 backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW);
4731 while (sd_iterator_cond (&sd_it, &dep))
4733 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
4734 rtx pro = DEP_PRO (dep);
4735 rtx con = DEP_CON (dep);
4736 rtx inc_cand = backwards ? pro : con;
4737 if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
4738 goto next;
4739 if (parse_add_or_inc (mii, inc_cand, backwards))
4741 struct dep_replacement *desc;
4742 df_ref *def_rec;
4743 rtx newaddr, newmem;
4745 if (sched_verbose >= 5)
4746 fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
4747 INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
4749 /* Need to assure that none of the operands of the inc
4750 instruction are assigned to by the mem insn. */
4751 for (def_rec = DF_INSN_DEFS (mii->mem_insn); *def_rec; def_rec++)
4753 df_ref def = *def_rec;
4754 if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
4755 || reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
4757 if (sched_verbose >= 5)
4758 fprintf (sched_dump,
4759 "inc conflicts with store failure.\n");
4760 goto next;
4763 newaddr = mii->inc_input;
4764 if (mii->mem_index != NULL_RTX)
4765 newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
4766 mii->mem_index);
4767 newaddr = plus_constant (GET_MODE (newaddr), newaddr,
4768 mii->mem_constant + mii->inc_constant);
4769 newmem = attempt_change (mii, newaddr);
4770 if (newmem == NULL_RTX)
4771 goto next;
4772 if (sched_verbose >= 5)
4773 fprintf (sched_dump, "successful address replacement\n");
4774 desc = XCNEW (struct dep_replacement);
4775 DEP_REPLACE (dep) = desc;
4776 desc->loc = mii->mem_loc;
4777 desc->newval = newmem;
4778 desc->orig = *desc->loc;
4779 desc->insn = mii->mem_insn;
4780 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
4781 INSN_SPEC_BACK_DEPS (con));
4782 if (backwards)
4784 FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
4785 add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
4786 REG_DEP_TRUE);
4788 else
4790 FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
4791 add_dependence_1 (DEP_CON (dep), mii->mem_insn,
4792 REG_DEP_ANTI);
4794 return true;
4796 next:
4797 sd_iterator_next (&sd_it);
4799 return false;
4802 /* A recursive function that walks ADDRESS_OF_X to find memory references
4803 which could be modified during scheduling. We call find_inc for each
4804 one we find that has a recognizable form. MII holds information about
4805 the pair of memory/increment instructions.
4806 We ensure that every instruction with a memory reference (which will be
4807 the location of the replacement) is assigned at most one breakable
4808 dependency. */
4810 static bool
4811 find_mem (struct mem_inc_info *mii, rtx *address_of_x)
4813 rtx x = *address_of_x;
4814 enum rtx_code code = GET_CODE (x);
4815 const char *const fmt = GET_RTX_FORMAT (code);
4816 int i;
4818 if (code == MEM)
4820 rtx reg0 = XEXP (x, 0);
4822 mii->mem_loc = address_of_x;
4823 mii->mem_index = NULL_RTX;
4824 mii->mem_constant = 0;
4825 if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
4827 mii->mem_constant = INTVAL (XEXP (reg0, 1));
4828 reg0 = XEXP (reg0, 0);
4830 if (GET_CODE (reg0) == PLUS)
4832 mii->mem_index = XEXP (reg0, 1);
4833 reg0 = XEXP (reg0, 0);
4835 if (REG_P (reg0))
4837 df_ref *def_rec;
4838 int occurrences = 0;
4840 /* Make sure this reg appears only once in this insn. Can't use
4841 count_occurrences since that only works for pseudos. */
4842 for (def_rec = DF_INSN_USES (mii->mem_insn); *def_rec; def_rec++)
4844 df_ref def = *def_rec;
4845 if (reg_overlap_mentioned_p (reg0, DF_REF_REG (def)))
4846 if (++occurrences > 1)
4848 if (sched_verbose >= 5)
4849 fprintf (sched_dump, "mem count failure\n");
4850 return false;
4854 mii->mem_reg0 = reg0;
4855 return find_inc (mii, true) || find_inc (mii, false);
4857 return false;
4860 if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
4862 /* If REG occurs inside a MEM used in a bit-field reference,
4863 that is unacceptable. */
4864 return false;
4867 /* Time for some deep diving. */
4868 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4870 if (fmt[i] == 'e')
4872 if (find_mem (mii, &XEXP (x, i)))
4873 return true;
4875 else if (fmt[i] == 'E')
4877 int j;
4878 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4879 if (find_mem (mii, &XVECEXP (x, i, j)))
4880 return true;
4883 return false;
4887 /* Examine the instructions between HEAD and TAIL and try to find
4888 dependencies that can be broken by modifying one of the patterns. */
4890 void
4891 find_modifiable_mems (rtx head, rtx tail)
4893 rtx insn, next_tail = NEXT_INSN (tail);
4894 int success_in_block = 0;
4896 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
4898 struct mem_inc_info mii;
4900 if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
4901 continue;
4903 mii.mem_insn = insn;
4904 if (find_mem (&mii, &PATTERN (insn)))
4905 success_in_block++;
4907 if (success_in_block && sched_verbose >= 5)
4908 fprintf (sched_dump, "%d candidates for address modification found.\n",
4909 success_in_block);
4912 #endif /* INSN_SCHEDULING */