1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992-2014 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
27 #include "diagnostic-core.h"
29 #include "tree.h" /* FIXME: Used by call_may_noreturn_p. */
31 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
40 #include "sched-int.h"
46 #ifdef INSN_SCHEDULING
48 #ifdef ENABLE_CHECKING
54 /* Holds current parameters for the dependency analyzer. */
55 struct sched_deps_info_def
*sched_deps_info
;
57 /* The data is specific to the Haifa scheduler. */
58 vec
<haifa_deps_insn_data_def
>
61 /* Return the major type present in the DS. */
69 return REG_DEP_OUTPUT
;
72 return REG_DEP_CONTROL
;
74 gcc_assert (ds
& DEP_ANTI
);
79 /* Return equivalent dep_status. */
81 dk_to_ds (enum reg_note dk
)
95 gcc_assert (dk
== REG_DEP_ANTI
);
100 /* Functions to operate with dependence information container - dep_t. */
102 /* Init DEP with the arguments. */
104 init_dep_1 (dep_t dep
, rtx pro
, rtx con
, enum reg_note type
, ds_t ds
)
108 DEP_TYPE (dep
) = type
;
109 DEP_STATUS (dep
) = ds
;
110 DEP_COST (dep
) = UNKNOWN_DEP_COST
;
111 DEP_NONREG (dep
) = 0;
112 DEP_MULTIPLE (dep
) = 0;
113 DEP_REPLACE (dep
) = NULL
;
116 /* Init DEP with the arguments.
117 While most of the scheduler (including targets) only need the major type
118 of the dependency, it is convenient to hide full dep_status from them. */
120 init_dep (dep_t dep
, rtx pro
, rtx con
, enum reg_note kind
)
124 if ((current_sched_info
->flags
& USE_DEPS_LIST
))
125 ds
= dk_to_ds (kind
);
129 init_dep_1 (dep
, pro
, con
, kind
, ds
);
132 /* Make a copy of FROM in TO. */
134 copy_dep (dep_t to
, dep_t from
)
136 memcpy (to
, from
, sizeof (*to
));
139 static void dump_ds (FILE *, ds_t
);
141 /* Define flags for dump_dep (). */
143 /* Dump producer of the dependence. */
144 #define DUMP_DEP_PRO (2)
146 /* Dump consumer of the dependence. */
147 #define DUMP_DEP_CON (4)
149 /* Dump type of the dependence. */
150 #define DUMP_DEP_TYPE (8)
152 /* Dump status of the dependence. */
153 #define DUMP_DEP_STATUS (16)
155 /* Dump all information about the dependence. */
156 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
160 FLAGS is a bit mask specifying what information about DEP needs
162 If FLAGS has the very first bit set, then dump all information about DEP
163 and propagate this bit into the callee dump functions. */
165 dump_dep (FILE *dump
, dep_t dep
, int flags
)
168 flags
|= DUMP_DEP_ALL
;
172 if (flags
& DUMP_DEP_PRO
)
173 fprintf (dump
, "%d; ", INSN_UID (DEP_PRO (dep
)));
175 if (flags
& DUMP_DEP_CON
)
176 fprintf (dump
, "%d; ", INSN_UID (DEP_CON (dep
)));
178 if (flags
& DUMP_DEP_TYPE
)
181 enum reg_note type
= DEP_TYPE (dep
);
193 case REG_DEP_CONTROL
:
206 fprintf (dump
, "%c; ", t
);
209 if (flags
& DUMP_DEP_STATUS
)
211 if (current_sched_info
->flags
& USE_DEPS_LIST
)
212 dump_ds (dump
, DEP_STATUS (dep
));
218 /* Default flags for dump_dep (). */
219 static int dump_dep_flags
= (DUMP_DEP_PRO
| DUMP_DEP_CON
);
221 /* Dump all fields of DEP to STDERR. */
223 sd_debug_dep (dep_t dep
)
225 dump_dep (stderr
, dep
, 1);
226 fprintf (stderr
, "\n");
229 /* Determine whether DEP is a dependency link of a non-debug insn on a
233 depl_on_debug_p (dep_link_t dep
)
235 return (DEBUG_INSN_P (DEP_LINK_PRO (dep
))
236 && !DEBUG_INSN_P (DEP_LINK_CON (dep
)));
239 /* Functions to operate with a single link from the dependencies lists -
242 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
245 attach_dep_link (dep_link_t l
, dep_link_t
*prev_nextp
)
247 dep_link_t next
= *prev_nextp
;
249 gcc_assert (DEP_LINK_PREV_NEXTP (l
) == NULL
250 && DEP_LINK_NEXT (l
) == NULL
);
252 /* Init node being inserted. */
253 DEP_LINK_PREV_NEXTP (l
) = prev_nextp
;
254 DEP_LINK_NEXT (l
) = next
;
259 gcc_assert (DEP_LINK_PREV_NEXTP (next
) == prev_nextp
);
261 DEP_LINK_PREV_NEXTP (next
) = &DEP_LINK_NEXT (l
);
268 /* Add dep_link LINK to deps_list L. */
270 add_to_deps_list (dep_link_t link
, deps_list_t l
)
272 attach_dep_link (link
, &DEPS_LIST_FIRST (l
));
274 /* Don't count debug deps. */
275 if (!depl_on_debug_p (link
))
276 ++DEPS_LIST_N_LINKS (l
);
279 /* Detach dep_link L from the list. */
281 detach_dep_link (dep_link_t l
)
283 dep_link_t
*prev_nextp
= DEP_LINK_PREV_NEXTP (l
);
284 dep_link_t next
= DEP_LINK_NEXT (l
);
289 DEP_LINK_PREV_NEXTP (next
) = prev_nextp
;
291 DEP_LINK_PREV_NEXTP (l
) = NULL
;
292 DEP_LINK_NEXT (l
) = NULL
;
295 /* Remove link LINK from list LIST. */
297 remove_from_deps_list (dep_link_t link
, deps_list_t list
)
299 detach_dep_link (link
);
301 /* Don't count debug deps. */
302 if (!depl_on_debug_p (link
))
303 --DEPS_LIST_N_LINKS (list
);
306 /* Move link LINK from list FROM to list TO. */
308 move_dep_link (dep_link_t link
, deps_list_t from
, deps_list_t to
)
310 remove_from_deps_list (link
, from
);
311 add_to_deps_list (link
, to
);
314 /* Return true of LINK is not attached to any list. */
316 dep_link_is_detached_p (dep_link_t link
)
318 return DEP_LINK_PREV_NEXTP (link
) == NULL
;
321 /* Pool to hold all dependency nodes (dep_node_t). */
322 static alloc_pool dn_pool
;
324 /* Number of dep_nodes out there. */
325 static int dn_pool_diff
= 0;
327 /* Create a dep_node. */
329 create_dep_node (void)
331 dep_node_t n
= (dep_node_t
) pool_alloc (dn_pool
);
332 dep_link_t back
= DEP_NODE_BACK (n
);
333 dep_link_t forw
= DEP_NODE_FORW (n
);
335 DEP_LINK_NODE (back
) = n
;
336 DEP_LINK_NEXT (back
) = NULL
;
337 DEP_LINK_PREV_NEXTP (back
) = NULL
;
339 DEP_LINK_NODE (forw
) = n
;
340 DEP_LINK_NEXT (forw
) = NULL
;
341 DEP_LINK_PREV_NEXTP (forw
) = NULL
;
348 /* Delete dep_node N. N must not be connected to any deps_list. */
350 delete_dep_node (dep_node_t n
)
352 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n
))
353 && dep_link_is_detached_p (DEP_NODE_FORW (n
)));
355 XDELETE (DEP_REPLACE (DEP_NODE_DEP (n
)));
359 pool_free (dn_pool
, n
);
362 /* Pool to hold dependencies lists (deps_list_t). */
363 static alloc_pool dl_pool
;
365 /* Number of deps_lists out there. */
366 static int dl_pool_diff
= 0;
368 /* Functions to operate with dependences lists - deps_list_t. */
370 /* Return true if list L is empty. */
372 deps_list_empty_p (deps_list_t l
)
374 return DEPS_LIST_N_LINKS (l
) == 0;
377 /* Create a new deps_list. */
379 create_deps_list (void)
381 deps_list_t l
= (deps_list_t
) pool_alloc (dl_pool
);
383 DEPS_LIST_FIRST (l
) = NULL
;
384 DEPS_LIST_N_LINKS (l
) = 0;
390 /* Free deps_list L. */
392 free_deps_list (deps_list_t l
)
394 gcc_assert (deps_list_empty_p (l
));
398 pool_free (dl_pool
, l
);
401 /* Return true if there is no dep_nodes and deps_lists out there.
402 After the region is scheduled all the dependency nodes and lists
403 should [generally] be returned to pool. */
405 deps_pools_are_empty_p (void)
407 return dn_pool_diff
== 0 && dl_pool_diff
== 0;
410 /* Remove all elements from L. */
412 clear_deps_list (deps_list_t l
)
416 dep_link_t link
= DEPS_LIST_FIRST (l
);
421 remove_from_deps_list (link
, l
);
426 /* Decide whether a dependency should be treated as a hard or a speculative
429 dep_spec_p (dep_t dep
)
431 if (current_sched_info
->flags
& DO_SPECULATION
)
433 if (DEP_STATUS (dep
) & SPECULATIVE
)
436 if (current_sched_info
->flags
& DO_PREDICATION
)
438 if (DEP_TYPE (dep
) == REG_DEP_CONTROL
)
441 if (DEP_REPLACE (dep
) != NULL
)
446 static regset reg_pending_sets
;
447 static regset reg_pending_clobbers
;
448 static regset reg_pending_uses
;
449 static regset reg_pending_control_uses
;
450 static enum reg_pending_barrier_mode reg_pending_barrier
;
452 /* Hard registers implicitly clobbered or used (or may be implicitly
453 clobbered or used) by the currently analyzed insn. For example,
454 insn in its constraint has one register class. Even if there is
455 currently no hard register in the insn, the particular hard
456 register will be in the insn after reload pass because the
457 constraint requires it. */
458 static HARD_REG_SET implicit_reg_pending_clobbers
;
459 static HARD_REG_SET implicit_reg_pending_uses
;
461 /* To speed up the test for duplicate dependency links we keep a
462 record of dependencies created by add_dependence when the average
463 number of instructions in a basic block is very large.
465 Studies have shown that there is typically around 5 instructions between
466 branches for typical C code. So we can make a guess that the average
467 basic block is approximately 5 instructions long; we will choose 100X
468 the average size as a very large basic block.
470 Each insn has associated bitmaps for its dependencies. Each bitmap
471 has enough entries to represent a dependency on any other insn in
472 the insn chain. All bitmap for true dependencies cache is
473 allocated then the rest two ones are also allocated. */
474 static bitmap_head
*true_dependency_cache
= NULL
;
475 static bitmap_head
*output_dependency_cache
= NULL
;
476 static bitmap_head
*anti_dependency_cache
= NULL
;
477 static bitmap_head
*control_dependency_cache
= NULL
;
478 static bitmap_head
*spec_dependency_cache
= NULL
;
479 static int cache_size
;
481 /* True if we should mark added dependencies as a non-register deps. */
482 static bool mark_as_hard
;
484 static int deps_may_trap_p (const_rtx
);
485 static void add_dependence_1 (rtx
, rtx
, enum reg_note
);
486 static void add_dependence_list (rtx
, rtx
, int, enum reg_note
, bool);
487 static void add_dependence_list_and_free (struct deps_desc
*, rtx
,
488 rtx
*, int, enum reg_note
, bool);
489 static void delete_all_dependences (rtx
);
490 static void chain_to_prev_insn (rtx
);
492 static void flush_pending_lists (struct deps_desc
*, rtx
, int, int);
493 static void sched_analyze_1 (struct deps_desc
*, rtx
, rtx
);
494 static void sched_analyze_2 (struct deps_desc
*, rtx
, rtx
);
495 static void sched_analyze_insn (struct deps_desc
*, rtx
, rtx
);
497 static bool sched_has_condition_p (const_rtx
);
498 static int conditions_mutex_p (const_rtx
, const_rtx
, bool, bool);
500 static enum DEPS_ADJUST_RESULT
maybe_add_or_update_dep_1 (dep_t
, bool,
502 static enum DEPS_ADJUST_RESULT
add_or_update_dep_1 (dep_t
, bool, rtx
, rtx
);
504 #ifdef ENABLE_CHECKING
505 static void check_dep (dep_t
, bool);
508 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
511 deps_may_trap_p (const_rtx mem
)
513 const_rtx addr
= XEXP (mem
, 0);
515 if (REG_P (addr
) && REGNO (addr
) >= FIRST_PSEUDO_REGISTER
)
517 const_rtx t
= get_reg_known_value (REGNO (addr
));
521 return rtx_addr_can_trap_p (addr
);
525 /* Find the condition under which INSN is executed. If REV is not NULL,
526 it is set to TRUE when the returned comparison should be reversed
527 to get the actual condition. */
529 sched_get_condition_with_rev_uncached (const_rtx insn
, bool *rev
)
531 rtx pat
= PATTERN (insn
);
537 if (GET_CODE (pat
) == COND_EXEC
)
538 return COND_EXEC_TEST (pat
);
540 if (!any_condjump_p (insn
) || !onlyjump_p (insn
))
543 src
= SET_SRC (pc_set (insn
));
545 if (XEXP (src
, 2) == pc_rtx
)
546 return XEXP (src
, 0);
547 else if (XEXP (src
, 1) == pc_rtx
)
549 rtx cond
= XEXP (src
, 0);
550 enum rtx_code revcode
= reversed_comparison_code (cond
, insn
);
552 if (revcode
== UNKNOWN
)
563 /* Return the condition under which INSN does not execute (i.e. the
564 not-taken condition for a conditional branch), or NULL if we cannot
565 find such a condition. The caller should make a copy of the condition
568 sched_get_reverse_condition_uncached (const_rtx insn
)
571 rtx cond
= sched_get_condition_with_rev_uncached (insn
, &rev
);
572 if (cond
== NULL_RTX
)
576 enum rtx_code revcode
= reversed_comparison_code (cond
, insn
);
577 cond
= gen_rtx_fmt_ee (revcode
, GET_MODE (cond
),
584 /* Caching variant of sched_get_condition_with_rev_uncached.
585 We only do actual work the first time we come here for an insn; the
586 results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */
588 sched_get_condition_with_rev (const_rtx insn
, bool *rev
)
592 if (INSN_LUID (insn
) == 0)
593 return sched_get_condition_with_rev_uncached (insn
, rev
);
595 if (INSN_CACHED_COND (insn
) == const_true_rtx
)
598 if (INSN_CACHED_COND (insn
) != NULL_RTX
)
601 *rev
= INSN_REVERSE_COND (insn
);
602 return INSN_CACHED_COND (insn
);
605 INSN_CACHED_COND (insn
) = sched_get_condition_with_rev_uncached (insn
, &tmp
);
606 INSN_REVERSE_COND (insn
) = tmp
;
608 if (INSN_CACHED_COND (insn
) == NULL_RTX
)
610 INSN_CACHED_COND (insn
) = const_true_rtx
;
615 *rev
= INSN_REVERSE_COND (insn
);
616 return INSN_CACHED_COND (insn
);
619 /* True when we can find a condition under which INSN is executed. */
621 sched_has_condition_p (const_rtx insn
)
623 return !! sched_get_condition_with_rev (insn
, NULL
);
628 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
630 conditions_mutex_p (const_rtx cond1
, const_rtx cond2
, bool rev1
, bool rev2
)
632 if (COMPARISON_P (cond1
)
633 && COMPARISON_P (cond2
)
634 && GET_CODE (cond1
) ==
636 ? reversed_comparison_code (cond2
, NULL
)
638 && rtx_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
639 && XEXP (cond1
, 1) == XEXP (cond2
, 1))
644 /* Return true if insn1 and insn2 can never depend on one another because
645 the conditions under which they are executed are mutually exclusive. */
647 sched_insns_conditions_mutex_p (const_rtx insn1
, const_rtx insn2
)
650 bool rev1
= false, rev2
= false;
652 /* df doesn't handle conditional lifetimes entirely correctly;
653 calls mess up the conditional lifetimes. */
654 if (!CALL_P (insn1
) && !CALL_P (insn2
))
656 cond1
= sched_get_condition_with_rev (insn1
, &rev1
);
657 cond2
= sched_get_condition_with_rev (insn2
, &rev2
);
659 && conditions_mutex_p (cond1
, cond2
, rev1
, rev2
)
660 /* Make sure first instruction doesn't affect condition of second
661 instruction if switched. */
662 && !modified_in_p (cond1
, insn2
)
663 /* Make sure second instruction doesn't affect condition of first
664 instruction if switched. */
665 && !modified_in_p (cond2
, insn1
))
672 /* Return true if INSN can potentially be speculated with type DS. */
674 sched_insn_is_legitimate_for_speculation_p (const_rtx insn
, ds_t ds
)
676 if (HAS_INTERNAL_DEP (insn
))
679 if (!NONJUMP_INSN_P (insn
))
682 if (SCHED_GROUP_P (insn
))
685 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX (insn
)))
688 if (side_effects_p (PATTERN (insn
)))
692 /* The following instructions, which depend on a speculatively scheduled
693 instruction, cannot be speculatively scheduled along. */
695 if (may_trap_or_fault_p (PATTERN (insn
)))
696 /* If instruction might fault, it cannot be speculatively scheduled.
697 For control speculation it's obvious why and for data speculation
698 it's because the insn might get wrong input if speculation
699 wasn't successful. */
702 if ((ds
& BE_IN_DATA
)
703 && sched_has_condition_p (insn
))
704 /* If this is a predicated instruction, then it cannot be
705 speculatively scheduled. See PR35659. */
712 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
713 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
714 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
715 This function is used to switch sd_iterator to the next list.
716 !!! For internal use only. Might consider moving it to sched-int.h. */
718 sd_next_list (const_rtx insn
, sd_list_types_def
*types_ptr
,
719 deps_list_t
*list_ptr
, bool *resolved_p_ptr
)
721 sd_list_types_def types
= *types_ptr
;
723 if (types
& SD_LIST_HARD_BACK
)
725 *list_ptr
= INSN_HARD_BACK_DEPS (insn
);
726 *resolved_p_ptr
= false;
727 *types_ptr
= types
& ~SD_LIST_HARD_BACK
;
729 else if (types
& SD_LIST_SPEC_BACK
)
731 *list_ptr
= INSN_SPEC_BACK_DEPS (insn
);
732 *resolved_p_ptr
= false;
733 *types_ptr
= types
& ~SD_LIST_SPEC_BACK
;
735 else if (types
& SD_LIST_FORW
)
737 *list_ptr
= INSN_FORW_DEPS (insn
);
738 *resolved_p_ptr
= false;
739 *types_ptr
= types
& ~SD_LIST_FORW
;
741 else if (types
& SD_LIST_RES_BACK
)
743 *list_ptr
= INSN_RESOLVED_BACK_DEPS (insn
);
744 *resolved_p_ptr
= true;
745 *types_ptr
= types
& ~SD_LIST_RES_BACK
;
747 else if (types
& SD_LIST_RES_FORW
)
749 *list_ptr
= INSN_RESOLVED_FORW_DEPS (insn
);
750 *resolved_p_ptr
= true;
751 *types_ptr
= types
& ~SD_LIST_RES_FORW
;
756 *resolved_p_ptr
= false;
757 *types_ptr
= SD_LIST_NONE
;
761 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
763 sd_lists_size (const_rtx insn
, sd_list_types_def list_types
)
767 while (list_types
!= SD_LIST_NONE
)
772 sd_next_list (insn
, &list_types
, &list
, &resolved_p
);
774 size
+= DEPS_LIST_N_LINKS (list
);
780 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
783 sd_lists_empty_p (const_rtx insn
, sd_list_types_def list_types
)
785 while (list_types
!= SD_LIST_NONE
)
790 sd_next_list (insn
, &list_types
, &list
, &resolved_p
);
791 if (!deps_list_empty_p (list
))
798 /* Initialize data for INSN. */
800 sd_init_insn (rtx insn
)
802 INSN_HARD_BACK_DEPS (insn
) = create_deps_list ();
803 INSN_SPEC_BACK_DEPS (insn
) = create_deps_list ();
804 INSN_RESOLVED_BACK_DEPS (insn
) = create_deps_list ();
805 INSN_FORW_DEPS (insn
) = create_deps_list ();
806 INSN_RESOLVED_FORW_DEPS (insn
) = create_deps_list ();
808 /* ??? It would be nice to allocate dependency caches here. */
811 /* Free data for INSN. */
813 sd_finish_insn (rtx insn
)
815 /* ??? It would be nice to deallocate dependency caches here. */
817 free_deps_list (INSN_HARD_BACK_DEPS (insn
));
818 INSN_HARD_BACK_DEPS (insn
) = NULL
;
820 free_deps_list (INSN_SPEC_BACK_DEPS (insn
));
821 INSN_SPEC_BACK_DEPS (insn
) = NULL
;
823 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn
));
824 INSN_RESOLVED_BACK_DEPS (insn
) = NULL
;
826 free_deps_list (INSN_FORW_DEPS (insn
));
827 INSN_FORW_DEPS (insn
) = NULL
;
829 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn
));
830 INSN_RESOLVED_FORW_DEPS (insn
) = NULL
;
833 /* Find a dependency between producer PRO and consumer CON.
834 Search through resolved dependency lists if RESOLVED_P is true.
835 If no such dependency is found return NULL,
836 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
837 with an iterator pointing to it. */
839 sd_find_dep_between_no_cache (rtx pro
, rtx con
, bool resolved_p
,
840 sd_iterator_def
*sd_it_ptr
)
842 sd_list_types_def pro_list_type
;
843 sd_list_types_def con_list_type
;
844 sd_iterator_def sd_it
;
846 bool found_p
= false;
850 pro_list_type
= SD_LIST_RES_FORW
;
851 con_list_type
= SD_LIST_RES_BACK
;
855 pro_list_type
= SD_LIST_FORW
;
856 con_list_type
= SD_LIST_BACK
;
859 /* Walk through either back list of INSN or forw list of ELEM
860 depending on which one is shorter. */
861 if (sd_lists_size (con
, con_list_type
) < sd_lists_size (pro
, pro_list_type
))
863 /* Find the dep_link with producer PRO in consumer's back_deps. */
864 FOR_EACH_DEP (con
, con_list_type
, sd_it
, dep
)
865 if (DEP_PRO (dep
) == pro
)
873 /* Find the dep_link with consumer CON in producer's forw_deps. */
874 FOR_EACH_DEP (pro
, pro_list_type
, sd_it
, dep
)
875 if (DEP_CON (dep
) == con
)
884 if (sd_it_ptr
!= NULL
)
893 /* Find a dependency between producer PRO and consumer CON.
894 Use dependency [if available] to check if dependency is present at all.
895 Search through resolved dependency lists if RESOLVED_P is true.
896 If the dependency or NULL if none found. */
898 sd_find_dep_between (rtx pro
, rtx con
, bool resolved_p
)
900 if (true_dependency_cache
!= NULL
)
901 /* Avoiding the list walk below can cut compile times dramatically
904 int elem_luid
= INSN_LUID (pro
);
905 int insn_luid
= INSN_LUID (con
);
907 if (!bitmap_bit_p (&true_dependency_cache
[insn_luid
], elem_luid
)
908 && !bitmap_bit_p (&output_dependency_cache
[insn_luid
], elem_luid
)
909 && !bitmap_bit_p (&anti_dependency_cache
[insn_luid
], elem_luid
)
910 && !bitmap_bit_p (&control_dependency_cache
[insn_luid
], elem_luid
))
914 return sd_find_dep_between_no_cache (pro
, con
, resolved_p
, NULL
);
917 /* Add or update a dependence described by DEP.
918 MEM1 and MEM2, if non-null, correspond to memory locations in case of
921 The function returns a value indicating if an old entry has been changed
922 or a new entry has been added to insn's backward deps.
924 This function merely checks if producer and consumer is the same insn
925 and doesn't create a dep in this case. Actual manipulation of
926 dependence data structures is performed in add_or_update_dep_1. */
927 static enum DEPS_ADJUST_RESULT
928 maybe_add_or_update_dep_1 (dep_t dep
, bool resolved_p
, rtx mem1
, rtx mem2
)
930 rtx elem
= DEP_PRO (dep
);
931 rtx insn
= DEP_CON (dep
);
933 gcc_assert (INSN_P (insn
) && INSN_P (elem
));
935 /* Don't depend an insn on itself. */
938 if (sched_deps_info
->generate_spec_deps
)
939 /* INSN has an internal dependence, which we can't overcome. */
940 HAS_INTERNAL_DEP (insn
) = 1;
945 return add_or_update_dep_1 (dep
, resolved_p
, mem1
, mem2
);
948 /* Ask dependency caches what needs to be done for dependence DEP.
949 Return DEP_CREATED if new dependence should be created and there is no
950 need to try to find one searching the dependencies lists.
951 Return DEP_PRESENT if there already is a dependence described by DEP and
952 hence nothing is to be done.
953 Return DEP_CHANGED if there already is a dependence, but it should be
954 updated to incorporate additional information from DEP. */
955 static enum DEPS_ADJUST_RESULT
956 ask_dependency_caches (dep_t dep
)
958 int elem_luid
= INSN_LUID (DEP_PRO (dep
));
959 int insn_luid
= INSN_LUID (DEP_CON (dep
));
961 gcc_assert (true_dependency_cache
!= NULL
962 && output_dependency_cache
!= NULL
963 && anti_dependency_cache
!= NULL
964 && control_dependency_cache
!= NULL
);
966 if (!(current_sched_info
->flags
& USE_DEPS_LIST
))
968 enum reg_note present_dep_type
;
970 if (bitmap_bit_p (&true_dependency_cache
[insn_luid
], elem_luid
))
971 present_dep_type
= REG_DEP_TRUE
;
972 else if (bitmap_bit_p (&output_dependency_cache
[insn_luid
], elem_luid
))
973 present_dep_type
= REG_DEP_OUTPUT
;
974 else if (bitmap_bit_p (&anti_dependency_cache
[insn_luid
], elem_luid
))
975 present_dep_type
= REG_DEP_ANTI
;
976 else if (bitmap_bit_p (&control_dependency_cache
[insn_luid
], elem_luid
))
977 present_dep_type
= REG_DEP_CONTROL
;
979 /* There is no existing dep so it should be created. */
982 if ((int) DEP_TYPE (dep
) >= (int) present_dep_type
)
983 /* DEP does not add anything to the existing dependence. */
988 ds_t present_dep_types
= 0;
990 if (bitmap_bit_p (&true_dependency_cache
[insn_luid
], elem_luid
))
991 present_dep_types
|= DEP_TRUE
;
992 if (bitmap_bit_p (&output_dependency_cache
[insn_luid
], elem_luid
))
993 present_dep_types
|= DEP_OUTPUT
;
994 if (bitmap_bit_p (&anti_dependency_cache
[insn_luid
], elem_luid
))
995 present_dep_types
|= DEP_ANTI
;
996 if (bitmap_bit_p (&control_dependency_cache
[insn_luid
], elem_luid
))
997 present_dep_types
|= DEP_CONTROL
;
999 if (present_dep_types
== 0)
1000 /* There is no existing dep so it should be created. */
1003 if (!(current_sched_info
->flags
& DO_SPECULATION
)
1004 || !bitmap_bit_p (&spec_dependency_cache
[insn_luid
], elem_luid
))
1006 if ((present_dep_types
| (DEP_STATUS (dep
) & DEP_TYPES
))
1007 == present_dep_types
)
1008 /* DEP does not add anything to the existing dependence. */
1013 /* Only true dependencies can be data speculative and
1014 only anti dependencies can be control speculative. */
1015 gcc_assert ((present_dep_types
& (DEP_TRUE
| DEP_ANTI
))
1016 == present_dep_types
);
1018 /* if (DEP is SPECULATIVE) then
1019 ..we should update DEP_STATUS
1021 ..we should reset existing dep to non-speculative. */
1028 /* Set dependency caches according to DEP. */
1030 set_dependency_caches (dep_t dep
)
1032 int elem_luid
= INSN_LUID (DEP_PRO (dep
));
1033 int insn_luid
= INSN_LUID (DEP_CON (dep
));
1035 if (!(current_sched_info
->flags
& USE_DEPS_LIST
))
1037 switch (DEP_TYPE (dep
))
1040 bitmap_set_bit (&true_dependency_cache
[insn_luid
], elem_luid
);
1043 case REG_DEP_OUTPUT
:
1044 bitmap_set_bit (&output_dependency_cache
[insn_luid
], elem_luid
);
1048 bitmap_set_bit (&anti_dependency_cache
[insn_luid
], elem_luid
);
1051 case REG_DEP_CONTROL
:
1052 bitmap_set_bit (&control_dependency_cache
[insn_luid
], elem_luid
);
1061 ds_t ds
= DEP_STATUS (dep
);
1064 bitmap_set_bit (&true_dependency_cache
[insn_luid
], elem_luid
);
1065 if (ds
& DEP_OUTPUT
)
1066 bitmap_set_bit (&output_dependency_cache
[insn_luid
], elem_luid
);
1068 bitmap_set_bit (&anti_dependency_cache
[insn_luid
], elem_luid
);
1069 if (ds
& DEP_CONTROL
)
1070 bitmap_set_bit (&control_dependency_cache
[insn_luid
], elem_luid
);
1072 if (ds
& SPECULATIVE
)
1074 gcc_assert (current_sched_info
->flags
& DO_SPECULATION
);
1075 bitmap_set_bit (&spec_dependency_cache
[insn_luid
], elem_luid
);
1080 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
1081 caches accordingly. */
1083 update_dependency_caches (dep_t dep
, enum reg_note old_type
)
1085 int elem_luid
= INSN_LUID (DEP_PRO (dep
));
1086 int insn_luid
= INSN_LUID (DEP_CON (dep
));
1088 /* Clear corresponding cache entry because type of the link
1089 may have changed. Keep them if we use_deps_list. */
1090 if (!(current_sched_info
->flags
& USE_DEPS_LIST
))
1094 case REG_DEP_OUTPUT
:
1095 bitmap_clear_bit (&output_dependency_cache
[insn_luid
], elem_luid
);
1099 bitmap_clear_bit (&anti_dependency_cache
[insn_luid
], elem_luid
);
1102 case REG_DEP_CONTROL
:
1103 bitmap_clear_bit (&control_dependency_cache
[insn_luid
], elem_luid
);
1111 set_dependency_caches (dep
);
1114 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
1116 change_spec_dep_to_hard (sd_iterator_def sd_it
)
1118 dep_node_t node
= DEP_LINK_NODE (*sd_it
.linkp
);
1119 dep_link_t link
= DEP_NODE_BACK (node
);
1120 dep_t dep
= DEP_NODE_DEP (node
);
1121 rtx elem
= DEP_PRO (dep
);
1122 rtx insn
= DEP_CON (dep
);
1124 move_dep_link (link
, INSN_SPEC_BACK_DEPS (insn
), INSN_HARD_BACK_DEPS (insn
));
1126 DEP_STATUS (dep
) &= ~SPECULATIVE
;
1128 if (true_dependency_cache
!= NULL
)
1129 /* Clear the cache entry. */
1130 bitmap_clear_bit (&spec_dependency_cache
[INSN_LUID (insn
)],
1134 /* Update DEP to incorporate information from NEW_DEP.
1135 SD_IT points to DEP in case it should be moved to another list.
1136 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1137 data-speculative dependence should be updated. */
1138 static enum DEPS_ADJUST_RESULT
1139 update_dep (dep_t dep
, dep_t new_dep
,
1140 sd_iterator_def sd_it ATTRIBUTE_UNUSED
,
1141 rtx mem1 ATTRIBUTE_UNUSED
,
1142 rtx mem2 ATTRIBUTE_UNUSED
)
1144 enum DEPS_ADJUST_RESULT res
= DEP_PRESENT
;
1145 enum reg_note old_type
= DEP_TYPE (dep
);
1146 bool was_spec
= dep_spec_p (dep
);
1148 DEP_NONREG (dep
) |= DEP_NONREG (new_dep
);
1149 DEP_MULTIPLE (dep
) = 1;
1151 /* If this is a more restrictive type of dependence than the
1152 existing one, then change the existing dependence to this
1154 if ((int) DEP_TYPE (new_dep
) < (int) old_type
)
1156 DEP_TYPE (dep
) = DEP_TYPE (new_dep
);
1160 if (current_sched_info
->flags
& USE_DEPS_LIST
)
1161 /* Update DEP_STATUS. */
1163 ds_t dep_status
= DEP_STATUS (dep
);
1164 ds_t ds
= DEP_STATUS (new_dep
);
1165 ds_t new_status
= ds
| dep_status
;
1167 if (new_status
& SPECULATIVE
)
1169 /* Either existing dep or a dep we're adding or both are
1171 if (!(ds
& SPECULATIVE
)
1172 || !(dep_status
& SPECULATIVE
))
1173 /* The new dep can't be speculative. */
1174 new_status
&= ~SPECULATIVE
;
1177 /* Both are speculative. Merge probabilities. */
1182 dw
= estimate_dep_weak (mem1
, mem2
);
1183 ds
= set_dep_weak (ds
, BEGIN_DATA
, dw
);
1186 new_status
= ds_merge (dep_status
, ds
);
1192 if (dep_status
!= ds
)
1194 DEP_STATUS (dep
) = ds
;
1199 if (was_spec
&& !dep_spec_p (dep
))
1200 /* The old dep was speculative, but now it isn't. */
1201 change_spec_dep_to_hard (sd_it
);
1203 if (true_dependency_cache
!= NULL
1204 && res
== DEP_CHANGED
)
1205 update_dependency_caches (dep
, old_type
);
1210 /* Add or update a dependence described by DEP.
1211 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1214 The function returns a value indicating if an old entry has been changed
1215 or a new entry has been added to insn's backward deps or nothing has
1216 been updated at all. */
1217 static enum DEPS_ADJUST_RESULT
1218 add_or_update_dep_1 (dep_t new_dep
, bool resolved_p
,
1219 rtx mem1 ATTRIBUTE_UNUSED
, rtx mem2 ATTRIBUTE_UNUSED
)
1221 bool maybe_present_p
= true;
1222 bool present_p
= false;
1224 gcc_assert (INSN_P (DEP_PRO (new_dep
)) && INSN_P (DEP_CON (new_dep
))
1225 && DEP_PRO (new_dep
) != DEP_CON (new_dep
));
1227 #ifdef ENABLE_CHECKING
1228 check_dep (new_dep
, mem1
!= NULL
);
1231 if (true_dependency_cache
!= NULL
)
1233 switch (ask_dependency_caches (new_dep
))
1239 maybe_present_p
= true;
1244 maybe_present_p
= false;
1254 /* Check that we don't already have this dependence. */
1255 if (maybe_present_p
)
1258 sd_iterator_def sd_it
;
1260 gcc_assert (true_dependency_cache
== NULL
|| present_p
);
1262 present_dep
= sd_find_dep_between_no_cache (DEP_PRO (new_dep
),
1264 resolved_p
, &sd_it
);
1266 if (present_dep
!= NULL
)
1267 /* We found an existing dependency between ELEM and INSN. */
1268 return update_dep (present_dep
, new_dep
, sd_it
, mem1
, mem2
);
1270 /* We didn't find a dep, it shouldn't present in the cache. */
1271 gcc_assert (!present_p
);
1274 /* Might want to check one level of transitivity to save conses.
1275 This check should be done in maybe_add_or_update_dep_1.
1276 Since we made it to add_or_update_dep_1, we must create
1277 (or update) a link. */
1279 if (mem1
!= NULL_RTX
)
1281 gcc_assert (sched_deps_info
->generate_spec_deps
);
1282 DEP_STATUS (new_dep
) = set_dep_weak (DEP_STATUS (new_dep
), BEGIN_DATA
,
1283 estimate_dep_weak (mem1
, mem2
));
1286 sd_add_dep (new_dep
, resolved_p
);
1291 /* Initialize BACK_LIST_PTR with consumer's backward list and
1292 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1293 initialize with lists that hold resolved deps. */
1295 get_back_and_forw_lists (dep_t dep
, bool resolved_p
,
1296 deps_list_t
*back_list_ptr
,
1297 deps_list_t
*forw_list_ptr
)
1299 rtx con
= DEP_CON (dep
);
1303 if (dep_spec_p (dep
))
1304 *back_list_ptr
= INSN_SPEC_BACK_DEPS (con
);
1306 *back_list_ptr
= INSN_HARD_BACK_DEPS (con
);
1308 *forw_list_ptr
= INSN_FORW_DEPS (DEP_PRO (dep
));
1312 *back_list_ptr
= INSN_RESOLVED_BACK_DEPS (con
);
1313 *forw_list_ptr
= INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep
));
1317 /* Add dependence described by DEP.
1318 If RESOLVED_P is true treat the dependence as a resolved one. */
1320 sd_add_dep (dep_t dep
, bool resolved_p
)
1322 dep_node_t n
= create_dep_node ();
1323 deps_list_t con_back_deps
;
1324 deps_list_t pro_forw_deps
;
1325 rtx elem
= DEP_PRO (dep
);
1326 rtx insn
= DEP_CON (dep
);
1328 gcc_assert (INSN_P (insn
) && INSN_P (elem
) && insn
!= elem
);
1330 if ((current_sched_info
->flags
& DO_SPECULATION
) == 0
1331 || !sched_insn_is_legitimate_for_speculation_p (insn
, DEP_STATUS (dep
)))
1332 DEP_STATUS (dep
) &= ~SPECULATIVE
;
1334 copy_dep (DEP_NODE_DEP (n
), dep
);
1336 get_back_and_forw_lists (dep
, resolved_p
, &con_back_deps
, &pro_forw_deps
);
1338 add_to_deps_list (DEP_NODE_BACK (n
), con_back_deps
);
1340 #ifdef ENABLE_CHECKING
1341 check_dep (dep
, false);
1344 add_to_deps_list (DEP_NODE_FORW (n
), pro_forw_deps
);
1346 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1347 in the bitmap caches of dependency information. */
1348 if (true_dependency_cache
!= NULL
)
1349 set_dependency_caches (dep
);
1352 /* Add or update backward dependence between INSN and ELEM
1353 with given type DEP_TYPE and dep_status DS.
1354 This function is a convenience wrapper. */
1355 enum DEPS_ADJUST_RESULT
1356 sd_add_or_update_dep (dep_t dep
, bool resolved_p
)
1358 return add_or_update_dep_1 (dep
, resolved_p
, NULL_RTX
, NULL_RTX
);
1361 /* Resolved dependence pointed to by SD_IT.
1362 SD_IT will advance to the next element. */
1364 sd_resolve_dep (sd_iterator_def sd_it
)
1366 dep_node_t node
= DEP_LINK_NODE (*sd_it
.linkp
);
1367 dep_t dep
= DEP_NODE_DEP (node
);
1368 rtx pro
= DEP_PRO (dep
);
1369 rtx con
= DEP_CON (dep
);
1371 if (dep_spec_p (dep
))
1372 move_dep_link (DEP_NODE_BACK (node
), INSN_SPEC_BACK_DEPS (con
),
1373 INSN_RESOLVED_BACK_DEPS (con
));
1375 move_dep_link (DEP_NODE_BACK (node
), INSN_HARD_BACK_DEPS (con
),
1376 INSN_RESOLVED_BACK_DEPS (con
));
1378 move_dep_link (DEP_NODE_FORW (node
), INSN_FORW_DEPS (pro
),
1379 INSN_RESOLVED_FORW_DEPS (pro
));
1382 /* Perform the inverse operation of sd_resolve_dep. Restore the dependence
1383 pointed to by SD_IT to unresolved state. */
1385 sd_unresolve_dep (sd_iterator_def sd_it
)
1387 dep_node_t node
= DEP_LINK_NODE (*sd_it
.linkp
);
1388 dep_t dep
= DEP_NODE_DEP (node
);
1389 rtx pro
= DEP_PRO (dep
);
1390 rtx con
= DEP_CON (dep
);
1392 if (dep_spec_p (dep
))
1393 move_dep_link (DEP_NODE_BACK (node
), INSN_RESOLVED_BACK_DEPS (con
),
1394 INSN_SPEC_BACK_DEPS (con
));
1396 move_dep_link (DEP_NODE_BACK (node
), INSN_RESOLVED_BACK_DEPS (con
),
1397 INSN_HARD_BACK_DEPS (con
));
1399 move_dep_link (DEP_NODE_FORW (node
), INSN_RESOLVED_FORW_DEPS (pro
),
1400 INSN_FORW_DEPS (pro
));
1403 /* Make TO depend on all the FROM's producers.
1404 If RESOLVED_P is true add dependencies to the resolved lists. */
1406 sd_copy_back_deps (rtx to
, rtx from
, bool resolved_p
)
1408 sd_list_types_def list_type
;
1409 sd_iterator_def sd_it
;
1412 list_type
= resolved_p
? SD_LIST_RES_BACK
: SD_LIST_BACK
;
1414 FOR_EACH_DEP (from
, list_type
, sd_it
, dep
)
1416 dep_def _new_dep
, *new_dep
= &_new_dep
;
1418 copy_dep (new_dep
, dep
);
1419 DEP_CON (new_dep
) = to
;
1420 sd_add_dep (new_dep
, resolved_p
);
1424 /* Remove a dependency referred to by SD_IT.
1425 SD_IT will point to the next dependence after removal. */
1427 sd_delete_dep (sd_iterator_def sd_it
)
1429 dep_node_t n
= DEP_LINK_NODE (*sd_it
.linkp
);
1430 dep_t dep
= DEP_NODE_DEP (n
);
1431 rtx pro
= DEP_PRO (dep
);
1432 rtx con
= DEP_CON (dep
);
1433 deps_list_t con_back_deps
;
1434 deps_list_t pro_forw_deps
;
1436 if (true_dependency_cache
!= NULL
)
1438 int elem_luid
= INSN_LUID (pro
);
1439 int insn_luid
= INSN_LUID (con
);
1441 bitmap_clear_bit (&true_dependency_cache
[insn_luid
], elem_luid
);
1442 bitmap_clear_bit (&anti_dependency_cache
[insn_luid
], elem_luid
);
1443 bitmap_clear_bit (&control_dependency_cache
[insn_luid
], elem_luid
);
1444 bitmap_clear_bit (&output_dependency_cache
[insn_luid
], elem_luid
);
1446 if (current_sched_info
->flags
& DO_SPECULATION
)
1447 bitmap_clear_bit (&spec_dependency_cache
[insn_luid
], elem_luid
);
1450 get_back_and_forw_lists (dep
, sd_it
.resolved_p
,
1451 &con_back_deps
, &pro_forw_deps
);
1453 remove_from_deps_list (DEP_NODE_BACK (n
), con_back_deps
);
1454 remove_from_deps_list (DEP_NODE_FORW (n
), pro_forw_deps
);
1456 delete_dep_node (n
);
1459 /* Dump size of the lists. */
1460 #define DUMP_LISTS_SIZE (2)
1462 /* Dump dependencies of the lists. */
1463 #define DUMP_LISTS_DEPS (4)
1465 /* Dump all information about the lists. */
1466 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1468 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1469 FLAGS is a bit mask specifying what information about the lists needs
1471 If FLAGS has the very first bit set, then dump all information about
1472 the lists and propagate this bit into the callee dump functions. */
1474 dump_lists (FILE *dump
, rtx insn
, sd_list_types_def types
, int flags
)
1476 sd_iterator_def sd_it
;
1483 flags
|= DUMP_LISTS_ALL
;
1485 fprintf (dump
, "[");
1487 if (flags
& DUMP_LISTS_SIZE
)
1488 fprintf (dump
, "%d; ", sd_lists_size (insn
, types
));
1490 if (flags
& DUMP_LISTS_DEPS
)
1492 FOR_EACH_DEP (insn
, types
, sd_it
, dep
)
1494 dump_dep (dump
, dep
, dump_dep_flags
| all
);
1495 fprintf (dump
, " ");
1500 /* Dump all information about deps_lists of INSN specified by TYPES
1503 sd_debug_lists (rtx insn
, sd_list_types_def types
)
1505 dump_lists (stderr
, insn
, types
, 1);
1506 fprintf (stderr
, "\n");
1509 /* A wrapper around add_dependence_1, to add a dependence of CON on
1510 PRO, with type DEP_TYPE. This function implements special handling
1511 for REG_DEP_CONTROL dependencies. For these, we optionally promote
1512 the type to REG_DEP_ANTI if we can determine that predication is
1513 impossible; otherwise we add additional true dependencies on the
1514 INSN_COND_DEPS list of the jump (which PRO must be). */
1516 add_dependence (rtx con
, rtx pro
, enum reg_note dep_type
)
1518 if (dep_type
== REG_DEP_CONTROL
1519 && !(current_sched_info
->flags
& DO_PREDICATION
))
1520 dep_type
= REG_DEP_ANTI
;
1522 /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1523 so we must also make the insn dependent on the setter of the
1525 if (dep_type
== REG_DEP_CONTROL
)
1528 rtx other
= real_insn_for_shadow (real_pro
);
1531 if (other
!= NULL_RTX
)
1533 cond
= sched_get_reverse_condition_uncached (real_pro
);
1534 /* Verify that the insn does not use a different value in
1535 the condition register than the one that was present at
1537 if (cond
== NULL_RTX
)
1538 dep_type
= REG_DEP_ANTI
;
1539 else if (INSN_CACHED_COND (real_pro
) == const_true_rtx
)
1542 CLEAR_HARD_REG_SET (uses
);
1543 note_uses (&PATTERN (con
), record_hard_reg_uses
, &uses
);
1544 if (TEST_HARD_REG_BIT (uses
, REGNO (XEXP (cond
, 0))))
1545 dep_type
= REG_DEP_ANTI
;
1547 if (dep_type
== REG_DEP_CONTROL
)
1549 if (sched_verbose
>= 5)
1550 fprintf (sched_dump
, "making DEP_CONTROL for %d\n",
1551 INSN_UID (real_pro
));
1552 add_dependence_list (con
, INSN_COND_DEPS (real_pro
), 0,
1553 REG_DEP_TRUE
, false);
1557 add_dependence_1 (con
, pro
, dep_type
);
1560 /* A convenience wrapper to operate on an entire list. HARD should be
1561 true if DEP_NONREG should be set on newly created dependencies. */
1564 add_dependence_list (rtx insn
, rtx list
, int uncond
, enum reg_note dep_type
,
1567 mark_as_hard
= hard
;
1568 for (; list
; list
= XEXP (list
, 1))
1570 if (uncond
|| ! sched_insns_conditions_mutex_p (insn
, XEXP (list
, 0)))
1571 add_dependence (insn
, XEXP (list
, 0), dep_type
);
1573 mark_as_hard
= false;
1576 /* Similar, but free *LISTP at the same time, when the context
1577 is not readonly. HARD should be true if DEP_NONREG should be set on
1578 newly created dependencies. */
1581 add_dependence_list_and_free (struct deps_desc
*deps
, rtx insn
, rtx
*listp
,
1582 int uncond
, enum reg_note dep_type
, bool hard
)
1584 add_dependence_list (insn
, *listp
, uncond
, dep_type
, hard
);
1586 /* We don't want to short-circuit dependencies involving debug
1587 insns, because they may cause actual dependencies to be
1589 if (deps
->readonly
|| DEBUG_INSN_P (insn
))
1592 free_INSN_LIST_list (listp
);
1595 /* Remove all occurrences of INSN from LIST. Return the number of
1596 occurrences removed. */
1599 remove_from_dependence_list (rtx insn
, rtx
* listp
)
1605 if (XEXP (*listp
, 0) == insn
)
1607 remove_free_INSN_LIST_node (listp
);
1612 listp
= &XEXP (*listp
, 1);
1618 /* Same as above, but process two lists at once. */
1620 remove_from_both_dependence_lists (rtx insn
, rtx
*listp
, rtx
*exprp
)
1626 if (XEXP (*listp
, 0) == insn
)
1628 remove_free_INSN_LIST_node (listp
);
1629 remove_free_EXPR_LIST_node (exprp
);
1634 listp
= &XEXP (*listp
, 1);
1635 exprp
= &XEXP (*exprp
, 1);
1641 /* Clear all dependencies for an insn. */
1643 delete_all_dependences (rtx insn
)
1645 sd_iterator_def sd_it
;
1648 /* The below cycle can be optimized to clear the caches and back_deps
1649 in one call but that would provoke duplication of code from
1652 for (sd_it
= sd_iterator_start (insn
, SD_LIST_BACK
);
1653 sd_iterator_cond (&sd_it
, &dep
);)
1654 sd_delete_dep (sd_it
);
1657 /* All insns in a scheduling group except the first should only have
1658 dependencies on the previous insn in the group. So we find the
1659 first instruction in the scheduling group by walking the dependence
1660 chains backwards. Then we add the dependencies for the group to
1661 the previous nonnote insn. */
1664 chain_to_prev_insn (rtx insn
)
1666 sd_iterator_def sd_it
;
1670 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
1673 rtx pro
= DEP_PRO (dep
);
1677 i
= prev_nonnote_insn (i
);
1681 } while (SCHED_GROUP_P (i
) || DEBUG_INSN_P (i
));
1683 if (! sched_insns_conditions_mutex_p (i
, pro
))
1684 add_dependence (i
, pro
, DEP_TYPE (dep
));
1688 delete_all_dependences (insn
);
1690 prev_nonnote
= prev_nonnote_nondebug_insn (insn
);
1691 if (BLOCK_FOR_INSN (insn
) == BLOCK_FOR_INSN (prev_nonnote
)
1692 && ! sched_insns_conditions_mutex_p (insn
, prev_nonnote
))
1693 add_dependence (insn
, prev_nonnote
, REG_DEP_ANTI
);
1696 /* Process an insn's memory dependencies. There are four kinds of
1699 (0) read dependence: read follows read
1700 (1) true dependence: read follows write
1701 (2) output dependence: write follows write
1702 (3) anti dependence: write follows read
1704 We are careful to build only dependencies which actually exist, and
1705 use transitivity to avoid building too many links. */
1707 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1708 The MEM is a memory reference contained within INSN, which we are saving
1709 so that we can do memory aliasing on it. */
1712 add_insn_mem_dependence (struct deps_desc
*deps
, bool read_p
,
1719 gcc_assert (!deps
->readonly
);
1722 insn_list
= &deps
->pending_read_insns
;
1723 mem_list
= &deps
->pending_read_mems
;
1724 if (!DEBUG_INSN_P (insn
))
1725 deps
->pending_read_list_length
++;
1729 insn_list
= &deps
->pending_write_insns
;
1730 mem_list
= &deps
->pending_write_mems
;
1731 deps
->pending_write_list_length
++;
1734 link
= alloc_INSN_LIST (insn
, *insn_list
);
1737 if (sched_deps_info
->use_cselib
)
1739 mem
= shallow_copy_rtx (mem
);
1740 XEXP (mem
, 0) = cselib_subst_to_values_from_insn (XEXP (mem
, 0),
1741 GET_MODE (mem
), insn
);
1743 link
= alloc_EXPR_LIST (VOIDmode
, canon_rtx (mem
), *mem_list
);
1747 /* Make a dependency between every memory reference on the pending lists
1748 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1749 dependencies for a read operation, similarly with FOR_WRITE. */
1752 flush_pending_lists (struct deps_desc
*deps
, rtx insn
, int for_read
,
1757 add_dependence_list_and_free (deps
, insn
, &deps
->pending_read_insns
,
1758 1, REG_DEP_ANTI
, true);
1759 if (!deps
->readonly
)
1761 free_EXPR_LIST_list (&deps
->pending_read_mems
);
1762 deps
->pending_read_list_length
= 0;
1766 add_dependence_list_and_free (deps
, insn
, &deps
->pending_write_insns
, 1,
1767 for_read
? REG_DEP_ANTI
: REG_DEP_OUTPUT
,
1770 add_dependence_list_and_free (deps
, insn
,
1771 &deps
->last_pending_memory_flush
, 1,
1772 for_read
? REG_DEP_ANTI
: REG_DEP_OUTPUT
,
1775 add_dependence_list_and_free (deps
, insn
, &deps
->pending_jump_insns
, 1,
1776 REG_DEP_ANTI
, true);
1778 if (DEBUG_INSN_P (insn
))
1781 free_INSN_LIST_list (&deps
->pending_read_insns
);
1782 free_INSN_LIST_list (&deps
->pending_write_insns
);
1783 free_INSN_LIST_list (&deps
->last_pending_memory_flush
);
1784 free_INSN_LIST_list (&deps
->pending_jump_insns
);
1787 if (!deps
->readonly
)
1789 free_EXPR_LIST_list (&deps
->pending_write_mems
);
1790 deps
->pending_write_list_length
= 0;
1792 deps
->last_pending_memory_flush
= alloc_INSN_LIST (insn
, NULL_RTX
);
1793 deps
->pending_flush_length
= 1;
1795 mark_as_hard
= false;
1798 /* Instruction which dependencies we are analyzing. */
1799 static rtx cur_insn
= NULL_RTX
;
1801 /* Implement hooks for haifa scheduler. */
1804 haifa_start_insn (rtx insn
)
1806 gcc_assert (insn
&& !cur_insn
);
1812 haifa_finish_insn (void)
1818 haifa_note_reg_set (int regno
)
1820 SET_REGNO_REG_SET (reg_pending_sets
, regno
);
1824 haifa_note_reg_clobber (int regno
)
1826 SET_REGNO_REG_SET (reg_pending_clobbers
, regno
);
1830 haifa_note_reg_use (int regno
)
1832 SET_REGNO_REG_SET (reg_pending_uses
, regno
);
1836 haifa_note_mem_dep (rtx mem
, rtx pending_mem
, rtx pending_insn
, ds_t ds
)
1838 if (!(ds
& SPECULATIVE
))
1841 pending_mem
= NULL_RTX
;
1844 gcc_assert (ds
& BEGIN_DATA
);
1847 dep_def _dep
, *dep
= &_dep
;
1849 init_dep_1 (dep
, pending_insn
, cur_insn
, ds_to_dt (ds
),
1850 current_sched_info
->flags
& USE_DEPS_LIST
? ds
: 0);
1851 DEP_NONREG (dep
) = 1;
1852 maybe_add_or_update_dep_1 (dep
, false, pending_mem
, mem
);
1858 haifa_note_dep (rtx elem
, ds_t ds
)
1863 init_dep (dep
, elem
, cur_insn
, ds_to_dt (ds
));
1865 DEP_NONREG (dep
) = 1;
1866 maybe_add_or_update_dep_1 (dep
, false, NULL_RTX
, NULL_RTX
);
1870 note_reg_use (int r
)
1872 if (sched_deps_info
->note_reg_use
)
1873 sched_deps_info
->note_reg_use (r
);
1877 note_reg_set (int r
)
1879 if (sched_deps_info
->note_reg_set
)
1880 sched_deps_info
->note_reg_set (r
);
1884 note_reg_clobber (int r
)
1886 if (sched_deps_info
->note_reg_clobber
)
1887 sched_deps_info
->note_reg_clobber (r
);
1891 note_mem_dep (rtx m1
, rtx m2
, rtx e
, ds_t ds
)
1893 if (sched_deps_info
->note_mem_dep
)
1894 sched_deps_info
->note_mem_dep (m1
, m2
, e
, ds
);
1898 note_dep (rtx e
, ds_t ds
)
1900 if (sched_deps_info
->note_dep
)
1901 sched_deps_info
->note_dep (e
, ds
);
1904 /* Return corresponding to DS reg_note. */
1909 return REG_DEP_TRUE
;
1910 else if (ds
& DEP_OUTPUT
)
1911 return REG_DEP_OUTPUT
;
1912 else if (ds
& DEP_ANTI
)
1913 return REG_DEP_ANTI
;
1916 gcc_assert (ds
& DEP_CONTROL
);
1917 return REG_DEP_CONTROL
;
1923 /* Functions for computation of info needed for register pressure
1924 sensitive insn scheduling. */
1927 /* Allocate and return reg_use_data structure for REGNO and INSN. */
1928 static struct reg_use_data
*
1929 create_insn_reg_use (int regno
, rtx insn
)
1931 struct reg_use_data
*use
;
1933 use
= (struct reg_use_data
*) xmalloc (sizeof (struct reg_use_data
));
1936 use
->next_insn_use
= INSN_REG_USE_LIST (insn
);
1937 INSN_REG_USE_LIST (insn
) = use
;
1941 /* Allocate reg_set_data structure for REGNO and INSN. */
1943 create_insn_reg_set (int regno
, rtx insn
)
1945 struct reg_set_data
*set
;
1947 set
= (struct reg_set_data
*) xmalloc (sizeof (struct reg_set_data
));
1950 set
->next_insn_set
= INSN_REG_SET_LIST (insn
);
1951 INSN_REG_SET_LIST (insn
) = set
;
1954 /* Set up insn register uses for INSN and dependency context DEPS. */
1956 setup_insn_reg_uses (struct deps_desc
*deps
, rtx insn
)
1959 reg_set_iterator rsi
;
1961 struct reg_use_data
*use
, *use2
, *next
;
1962 struct deps_reg
*reg_last
;
1964 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses
, 0, i
, rsi
)
1966 if (i
< FIRST_PSEUDO_REGISTER
1967 && TEST_HARD_REG_BIT (ira_no_alloc_regs
, i
))
1970 if (find_regno_note (insn
, REG_DEAD
, i
) == NULL_RTX
1971 && ! REGNO_REG_SET_P (reg_pending_sets
, i
)
1972 && ! REGNO_REG_SET_P (reg_pending_clobbers
, i
))
1973 /* Ignore use which is not dying. */
1976 use
= create_insn_reg_use (i
, insn
);
1977 use
->next_regno_use
= use
;
1978 reg_last
= &deps
->reg_last
[i
];
1980 /* Create the cycle list of uses. */
1981 for (list
= reg_last
->uses
; list
; list
= XEXP (list
, 1))
1983 use2
= create_insn_reg_use (i
, XEXP (list
, 0));
1984 next
= use
->next_regno_use
;
1985 use
->next_regno_use
= use2
;
1986 use2
->next_regno_use
= next
;
1991 /* Register pressure info for the currently processed insn. */
1992 static struct reg_pressure_data reg_pressure_info
[N_REG_CLASSES
];
1994 /* Return TRUE if INSN has the use structure for REGNO. */
1996 insn_use_p (rtx insn
, int regno
)
1998 struct reg_use_data
*use
;
2000 for (use
= INSN_REG_USE_LIST (insn
); use
!= NULL
; use
= use
->next_insn_use
)
2001 if (use
->regno
== regno
)
2006 /* Update the register pressure info after birth of pseudo register REGNO
2007 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
2008 the register is in clobber or unused after the insn. */
2010 mark_insn_pseudo_birth (rtx insn
, int regno
, bool clobber_p
, bool unused_p
)
2015 gcc_assert (regno
>= FIRST_PSEUDO_REGISTER
);
2016 cl
= sched_regno_pressure_class
[regno
];
2019 incr
= ira_reg_class_max_nregs
[cl
][PSEUDO_REGNO_MODE (regno
)];
2022 new_incr
= reg_pressure_info
[cl
].clobber_increase
+ incr
;
2023 reg_pressure_info
[cl
].clobber_increase
= new_incr
;
2027 new_incr
= reg_pressure_info
[cl
].unused_set_increase
+ incr
;
2028 reg_pressure_info
[cl
].unused_set_increase
= new_incr
;
2032 new_incr
= reg_pressure_info
[cl
].set_increase
+ incr
;
2033 reg_pressure_info
[cl
].set_increase
= new_incr
;
2034 if (! insn_use_p (insn
, regno
))
2035 reg_pressure_info
[cl
].change
+= incr
;
2036 create_insn_reg_set (regno
, insn
);
2038 gcc_assert (new_incr
< (1 << INCREASE_BITS
));
2042 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2043 hard registers involved in the birth. */
2045 mark_insn_hard_regno_birth (rtx insn
, int regno
, int nregs
,
2046 bool clobber_p
, bool unused_p
)
2049 int new_incr
, last
= regno
+ nregs
;
2051 while (regno
< last
)
2053 gcc_assert (regno
< FIRST_PSEUDO_REGISTER
);
2054 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs
, regno
))
2056 cl
= sched_regno_pressure_class
[regno
];
2061 new_incr
= reg_pressure_info
[cl
].clobber_increase
+ 1;
2062 reg_pressure_info
[cl
].clobber_increase
= new_incr
;
2066 new_incr
= reg_pressure_info
[cl
].unused_set_increase
+ 1;
2067 reg_pressure_info
[cl
].unused_set_increase
= new_incr
;
2071 new_incr
= reg_pressure_info
[cl
].set_increase
+ 1;
2072 reg_pressure_info
[cl
].set_increase
= new_incr
;
2073 if (! insn_use_p (insn
, regno
))
2074 reg_pressure_info
[cl
].change
+= 1;
2075 create_insn_reg_set (regno
, insn
);
2077 gcc_assert (new_incr
< (1 << INCREASE_BITS
));
2084 /* Update the register pressure info after birth of pseudo or hard
2085 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
2086 correspondingly that the register is in clobber or unused after the
2089 mark_insn_reg_birth (rtx insn
, rtx reg
, bool clobber_p
, bool unused_p
)
2093 if (GET_CODE (reg
) == SUBREG
)
2094 reg
= SUBREG_REG (reg
);
2099 regno
= REGNO (reg
);
2100 if (regno
< FIRST_PSEUDO_REGISTER
)
2101 mark_insn_hard_regno_birth (insn
, regno
,
2102 hard_regno_nregs
[regno
][GET_MODE (reg
)],
2103 clobber_p
, unused_p
);
2105 mark_insn_pseudo_birth (insn
, regno
, clobber_p
, unused_p
);
2108 /* Update the register pressure info after death of pseudo register
2111 mark_pseudo_death (int regno
)
2116 gcc_assert (regno
>= FIRST_PSEUDO_REGISTER
);
2117 cl
= sched_regno_pressure_class
[regno
];
2120 incr
= ira_reg_class_max_nregs
[cl
][PSEUDO_REGNO_MODE (regno
)];
2121 reg_pressure_info
[cl
].change
-= incr
;
2125 /* Like mark_pseudo_death except that NREGS saying how many hard
2126 registers involved in the death. */
2128 mark_hard_regno_death (int regno
, int nregs
)
2131 int last
= regno
+ nregs
;
2133 while (regno
< last
)
2135 gcc_assert (regno
< FIRST_PSEUDO_REGISTER
);
2136 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs
, regno
))
2138 cl
= sched_regno_pressure_class
[regno
];
2140 reg_pressure_info
[cl
].change
-= 1;
2146 /* Update the register pressure info after death of pseudo or hard
2149 mark_reg_death (rtx reg
)
2153 if (GET_CODE (reg
) == SUBREG
)
2154 reg
= SUBREG_REG (reg
);
2159 regno
= REGNO (reg
);
2160 if (regno
< FIRST_PSEUDO_REGISTER
)
2161 mark_hard_regno_death (regno
, hard_regno_nregs
[regno
][GET_MODE (reg
)]);
2163 mark_pseudo_death (regno
);
2166 /* Process SETTER of REG. DATA is an insn containing the setter. */
2168 mark_insn_reg_store (rtx reg
, const_rtx setter
, void *data
)
2170 if (setter
!= NULL_RTX
&& GET_CODE (setter
) != SET
)
2173 ((rtx
) data
, reg
, false,
2174 find_reg_note ((const_rtx
) data
, REG_UNUSED
, reg
) != NULL_RTX
);
2177 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
2179 mark_insn_reg_clobber (rtx reg
, const_rtx setter
, void *data
)
2181 if (GET_CODE (setter
) == CLOBBER
)
2182 mark_insn_reg_birth ((rtx
) data
, reg
, true, false);
2185 /* Set up reg pressure info related to INSN. */
2187 init_insn_reg_pressure_info (rtx insn
)
2191 static struct reg_pressure_data
*pressure_info
;
2194 gcc_assert (sched_pressure
!= SCHED_PRESSURE_NONE
);
2196 if (! INSN_P (insn
))
2199 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
2201 cl
= ira_pressure_classes
[i
];
2202 reg_pressure_info
[cl
].clobber_increase
= 0;
2203 reg_pressure_info
[cl
].set_increase
= 0;
2204 reg_pressure_info
[cl
].unused_set_increase
= 0;
2205 reg_pressure_info
[cl
].change
= 0;
2208 note_stores (PATTERN (insn
), mark_insn_reg_clobber
, insn
);
2210 note_stores (PATTERN (insn
), mark_insn_reg_store
, insn
);
2213 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2214 if (REG_NOTE_KIND (link
) == REG_INC
)
2215 mark_insn_reg_store (XEXP (link
, 0), NULL_RTX
, insn
);
2218 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2219 if (REG_NOTE_KIND (link
) == REG_DEAD
)
2220 mark_reg_death (XEXP (link
, 0));
2222 len
= sizeof (struct reg_pressure_data
) * ira_pressure_classes_num
;
2224 = INSN_REG_PRESSURE (insn
) = (struct reg_pressure_data
*) xmalloc (len
);
2225 if (sched_pressure
== SCHED_PRESSURE_WEIGHTED
)
2226 INSN_MAX_REG_PRESSURE (insn
) = (int *) xcalloc (ira_pressure_classes_num
2228 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
2230 cl
= ira_pressure_classes
[i
];
2231 pressure_info
[i
].clobber_increase
2232 = reg_pressure_info
[cl
].clobber_increase
;
2233 pressure_info
[i
].set_increase
= reg_pressure_info
[cl
].set_increase
;
2234 pressure_info
[i
].unused_set_increase
2235 = reg_pressure_info
[cl
].unused_set_increase
;
2236 pressure_info
[i
].change
= reg_pressure_info
[cl
].change
;
2243 /* Internal variable for sched_analyze_[12] () functions.
2244 If it is nonzero, this means that sched_analyze_[12] looks
2245 at the most toplevel SET. */
2246 static bool can_start_lhs_rhs_p
;
2248 /* Extend reg info for the deps context DEPS given that
2249 we have just generated a register numbered REGNO. */
2251 extend_deps_reg_info (struct deps_desc
*deps
, int regno
)
2253 int max_regno
= regno
+ 1;
2255 gcc_assert (!reload_completed
);
2257 /* In a readonly context, it would not hurt to extend info,
2258 but it should not be needed. */
2259 if (reload_completed
&& deps
->readonly
)
2261 deps
->max_reg
= max_regno
;
2265 if (max_regno
> deps
->max_reg
)
2267 deps
->reg_last
= XRESIZEVEC (struct deps_reg
, deps
->reg_last
,
2269 memset (&deps
->reg_last
[deps
->max_reg
],
2270 0, (max_regno
- deps
->max_reg
)
2271 * sizeof (struct deps_reg
));
2272 deps
->max_reg
= max_regno
;
2276 /* Extends REG_INFO_P if needed. */
2278 maybe_extend_reg_info_p (void)
2280 /* Extend REG_INFO_P, if needed. */
2281 if ((unsigned int)max_regno
- 1 >= reg_info_p_size
)
2283 size_t new_reg_info_p_size
= max_regno
+ 128;
2285 gcc_assert (!reload_completed
&& sel_sched_p ());
2287 reg_info_p
= (struct reg_info_t
*) xrecalloc (reg_info_p
,
2288 new_reg_info_p_size
,
2290 sizeof (*reg_info_p
));
2291 reg_info_p_size
= new_reg_info_p_size
;
2295 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2296 The type of the reference is specified by REF and can be SET,
2297 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2300 sched_analyze_reg (struct deps_desc
*deps
, int regno
, enum machine_mode mode
,
2301 enum rtx_code ref
, rtx insn
)
2303 /* We could emit new pseudos in renaming. Extend the reg structures. */
2304 if (!reload_completed
&& sel_sched_p ()
2305 && (regno
>= max_reg_num () - 1 || regno
>= deps
->max_reg
))
2306 extend_deps_reg_info (deps
, regno
);
2308 maybe_extend_reg_info_p ();
2310 /* A hard reg in a wide mode may really be multiple registers.
2311 If so, mark all of them just like the first. */
2312 if (regno
< FIRST_PSEUDO_REGISTER
)
2314 int i
= hard_regno_nregs
[regno
][mode
];
2318 note_reg_set (regno
+ i
);
2320 else if (ref
== USE
)
2323 note_reg_use (regno
+ i
);
2328 note_reg_clobber (regno
+ i
);
2332 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2333 it does not reload. Ignore these as they have served their
2335 else if (regno
>= deps
->max_reg
)
2337 enum rtx_code code
= GET_CODE (PATTERN (insn
));
2338 gcc_assert (code
== USE
|| code
== CLOBBER
);
2344 note_reg_set (regno
);
2345 else if (ref
== USE
)
2346 note_reg_use (regno
);
2348 note_reg_clobber (regno
);
2350 /* Pseudos that are REG_EQUIV to something may be replaced
2351 by that during reloading. We need only add dependencies for
2352 the address in the REG_EQUIV note. */
2353 if (!reload_completed
&& get_reg_known_equiv_p (regno
))
2355 rtx t
= get_reg_known_value (regno
);
2357 sched_analyze_2 (deps
, XEXP (t
, 0), insn
);
2360 /* Don't let it cross a call after scheduling if it doesn't
2361 already cross one. */
2362 if (REG_N_CALLS_CROSSED (regno
) == 0)
2364 if (!deps
->readonly
&& ref
== USE
&& !DEBUG_INSN_P (insn
))
2365 deps
->sched_before_next_call
2366 = alloc_INSN_LIST (insn
, deps
->sched_before_next_call
);
2368 add_dependence_list (insn
, deps
->last_function_call
, 1,
2369 REG_DEP_ANTI
, false);
2374 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2375 rtx, X, creating all dependencies generated by the write to the
2376 destination of X, and reads of everything mentioned. */
2379 sched_analyze_1 (struct deps_desc
*deps
, rtx x
, rtx insn
)
2381 rtx dest
= XEXP (x
, 0);
2382 enum rtx_code code
= GET_CODE (x
);
2383 bool cslr_p
= can_start_lhs_rhs_p
;
2385 can_start_lhs_rhs_p
= false;
2391 if (cslr_p
&& sched_deps_info
->start_lhs
)
2392 sched_deps_info
->start_lhs (dest
);
2394 if (GET_CODE (dest
) == PARALLEL
)
2398 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
2399 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
2400 sched_analyze_1 (deps
,
2401 gen_rtx_CLOBBER (VOIDmode
,
2402 XEXP (XVECEXP (dest
, 0, i
), 0)),
2405 if (cslr_p
&& sched_deps_info
->finish_lhs
)
2406 sched_deps_info
->finish_lhs ();
2410 can_start_lhs_rhs_p
= cslr_p
;
2412 sched_analyze_2 (deps
, SET_SRC (x
), insn
);
2414 can_start_lhs_rhs_p
= false;
2420 while (GET_CODE (dest
) == STRICT_LOW_PART
|| GET_CODE (dest
) == SUBREG
2421 || GET_CODE (dest
) == ZERO_EXTRACT
)
2423 if (GET_CODE (dest
) == STRICT_LOW_PART
2424 || GET_CODE (dest
) == ZERO_EXTRACT
2425 || df_read_modify_subreg_p (dest
))
2427 /* These both read and modify the result. We must handle
2428 them as writes to get proper dependencies for following
2429 instructions. We must handle them as reads to get proper
2430 dependencies from this to previous instructions.
2431 Thus we need to call sched_analyze_2. */
2433 sched_analyze_2 (deps
, XEXP (dest
, 0), insn
);
2435 if (GET_CODE (dest
) == ZERO_EXTRACT
)
2437 /* The second and third arguments are values read by this insn. */
2438 sched_analyze_2 (deps
, XEXP (dest
, 1), insn
);
2439 sched_analyze_2 (deps
, XEXP (dest
, 2), insn
);
2441 dest
= XEXP (dest
, 0);
2446 int regno
= REGNO (dest
);
2447 enum machine_mode mode
= GET_MODE (dest
);
2449 sched_analyze_reg (deps
, regno
, mode
, code
, insn
);
2452 /* Treat all writes to a stack register as modifying the TOS. */
2453 if (regno
>= FIRST_STACK_REG
&& regno
<= LAST_STACK_REG
)
2455 /* Avoid analyzing the same register twice. */
2456 if (regno
!= FIRST_STACK_REG
)
2457 sched_analyze_reg (deps
, FIRST_STACK_REG
, mode
, code
, insn
);
2459 add_to_hard_reg_set (&implicit_reg_pending_uses
, mode
,
2464 else if (MEM_P (dest
))
2466 /* Writing memory. */
2469 if (sched_deps_info
->use_cselib
)
2471 enum machine_mode address_mode
= get_address_mode (dest
);
2473 t
= shallow_copy_rtx (dest
);
2474 cselib_lookup_from_insn (XEXP (t
, 0), address_mode
, 1,
2475 GET_MODE (t
), insn
);
2477 = cselib_subst_to_values_from_insn (XEXP (t
, 0), GET_MODE (t
),
2482 /* Pending lists can't get larger with a readonly context. */
2484 && ((deps
->pending_read_list_length
+ deps
->pending_write_list_length
)
2485 > MAX_PENDING_LIST_LENGTH
))
2487 /* Flush all pending reads and writes to prevent the pending lists
2488 from getting any larger. Insn scheduling runs too slowly when
2489 these lists get long. When compiling GCC with itself,
2490 this flush occurs 8 times for sparc, and 10 times for m88k using
2491 the default value of 32. */
2492 flush_pending_lists (deps
, insn
, false, true);
2496 rtx pending
, pending_mem
;
2498 pending
= deps
->pending_read_insns
;
2499 pending_mem
= deps
->pending_read_mems
;
2502 if (anti_dependence (XEXP (pending_mem
, 0), t
)
2503 && ! sched_insns_conditions_mutex_p (insn
, XEXP (pending
, 0)))
2504 note_mem_dep (t
, XEXP (pending_mem
, 0), XEXP (pending
, 0),
2507 pending
= XEXP (pending
, 1);
2508 pending_mem
= XEXP (pending_mem
, 1);
2511 pending
= deps
->pending_write_insns
;
2512 pending_mem
= deps
->pending_write_mems
;
2515 if (output_dependence (XEXP (pending_mem
, 0), t
)
2516 && ! sched_insns_conditions_mutex_p (insn
, XEXP (pending
, 0)))
2517 note_mem_dep (t
, XEXP (pending_mem
, 0), XEXP (pending
, 0),
2520 pending
= XEXP (pending
, 1);
2521 pending_mem
= XEXP (pending_mem
, 1);
2524 add_dependence_list (insn
, deps
->last_pending_memory_flush
, 1,
2525 REG_DEP_ANTI
, true);
2526 add_dependence_list (insn
, deps
->pending_jump_insns
, 1,
2527 REG_DEP_CONTROL
, true);
2529 if (!deps
->readonly
)
2530 add_insn_mem_dependence (deps
, false, insn
, dest
);
2532 sched_analyze_2 (deps
, XEXP (dest
, 0), insn
);
2535 if (cslr_p
&& sched_deps_info
->finish_lhs
)
2536 sched_deps_info
->finish_lhs ();
2538 /* Analyze reads. */
2539 if (GET_CODE (x
) == SET
)
2541 can_start_lhs_rhs_p
= cslr_p
;
2543 sched_analyze_2 (deps
, SET_SRC (x
), insn
);
2545 can_start_lhs_rhs_p
= false;
2549 /* Analyze the uses of memory and registers in rtx X in INSN. */
2551 sched_analyze_2 (struct deps_desc
*deps
, rtx x
, rtx insn
)
2557 bool cslr_p
= can_start_lhs_rhs_p
;
2559 can_start_lhs_rhs_p
= false;
2565 if (cslr_p
&& sched_deps_info
->start_rhs
)
2566 sched_deps_info
->start_rhs (x
);
2568 code
= GET_CODE (x
);
2576 /* Ignore constants. */
2577 if (cslr_p
&& sched_deps_info
->finish_rhs
)
2578 sched_deps_info
->finish_rhs ();
2584 /* User of CC0 depends on immediately preceding insn. */
2585 SCHED_GROUP_P (insn
) = 1;
2586 /* Don't move CC0 setter to another block (it can set up the
2587 same flag for previous CC0 users which is safe). */
2588 CANT_MOVE (prev_nonnote_insn (insn
)) = 1;
2590 if (cslr_p
&& sched_deps_info
->finish_rhs
)
2591 sched_deps_info
->finish_rhs ();
2598 int regno
= REGNO (x
);
2599 enum machine_mode mode
= GET_MODE (x
);
2601 sched_analyze_reg (deps
, regno
, mode
, USE
, insn
);
2604 /* Treat all reads of a stack register as modifying the TOS. */
2605 if (regno
>= FIRST_STACK_REG
&& regno
<= LAST_STACK_REG
)
2607 /* Avoid analyzing the same register twice. */
2608 if (regno
!= FIRST_STACK_REG
)
2609 sched_analyze_reg (deps
, FIRST_STACK_REG
, mode
, USE
, insn
);
2610 sched_analyze_reg (deps
, FIRST_STACK_REG
, mode
, SET
, insn
);
2614 if (cslr_p
&& sched_deps_info
->finish_rhs
)
2615 sched_deps_info
->finish_rhs ();
2622 /* Reading memory. */
2624 rtx pending
, pending_mem
;
2627 if (sched_deps_info
->use_cselib
)
2629 enum machine_mode address_mode
= get_address_mode (t
);
2631 t
= shallow_copy_rtx (t
);
2632 cselib_lookup_from_insn (XEXP (t
, 0), address_mode
, 1,
2633 GET_MODE (t
), insn
);
2635 = cselib_subst_to_values_from_insn (XEXP (t
, 0), GET_MODE (t
),
2639 if (!DEBUG_INSN_P (insn
))
2642 pending
= deps
->pending_read_insns
;
2643 pending_mem
= deps
->pending_read_mems
;
2646 if (read_dependence (XEXP (pending_mem
, 0), t
)
2647 && ! sched_insns_conditions_mutex_p (insn
,
2649 note_mem_dep (t
, XEXP (pending_mem
, 0), XEXP (pending
, 0),
2652 pending
= XEXP (pending
, 1);
2653 pending_mem
= XEXP (pending_mem
, 1);
2656 pending
= deps
->pending_write_insns
;
2657 pending_mem
= deps
->pending_write_mems
;
2660 if (true_dependence (XEXP (pending_mem
, 0), VOIDmode
, t
)
2661 && ! sched_insns_conditions_mutex_p (insn
,
2663 note_mem_dep (t
, XEXP (pending_mem
, 0), XEXP (pending
, 0),
2664 sched_deps_info
->generate_spec_deps
2665 ? BEGIN_DATA
| DEP_TRUE
: DEP_TRUE
);
2667 pending
= XEXP (pending
, 1);
2668 pending_mem
= XEXP (pending_mem
, 1);
2671 for (u
= deps
->last_pending_memory_flush
; u
; u
= XEXP (u
, 1))
2672 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
2674 for (u
= deps
->pending_jump_insns
; u
; u
= XEXP (u
, 1))
2675 if (deps_may_trap_p (x
))
2677 if ((sched_deps_info
->generate_spec_deps
)
2678 && sel_sched_p () && (spec_info
->mask
& BEGIN_CONTROL
))
2680 ds_t ds
= set_dep_weak (DEP_ANTI
, BEGIN_CONTROL
,
2683 note_dep (XEXP (u
, 0), ds
);
2686 add_dependence (insn
, XEXP (u
, 0), REG_DEP_CONTROL
);
2690 /* Always add these dependencies to pending_reads, since
2691 this insn may be followed by a write. */
2692 if (!deps
->readonly
)
2694 if ((deps
->pending_read_list_length
2695 + deps
->pending_write_list_length
)
2696 > MAX_PENDING_LIST_LENGTH
2697 && !DEBUG_INSN_P (insn
))
2698 flush_pending_lists (deps
, insn
, true, true);
2699 add_insn_mem_dependence (deps
, true, insn
, x
);
2702 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
2704 if (cslr_p
&& sched_deps_info
->finish_rhs
)
2705 sched_deps_info
->finish_rhs ();
2710 /* Force pending stores to memory in case a trap handler needs them. */
2712 flush_pending_lists (deps
, insn
, true, false);
2716 if (PREFETCH_SCHEDULE_BARRIER_P (x
))
2717 reg_pending_barrier
= TRUE_BARRIER
;
2718 /* Prefetch insn contains addresses only. So if the prefetch
2719 address has no registers, there will be no dependencies on
2720 the prefetch insn. This is wrong with result code
2721 correctness point of view as such prefetch can be moved below
2722 a jump insn which usually generates MOVE_BARRIER preventing
2723 to move insns containing registers or memories through the
2724 barrier. It is also wrong with generated code performance
2725 point of view as prefetch withouth dependecies will have a
2726 tendency to be issued later instead of earlier. It is hard
2727 to generate accurate dependencies for prefetch insns as
2728 prefetch has only the start address but it is better to have
2729 something than nothing. */
2730 if (!deps
->readonly
)
2732 rtx x
= gen_rtx_MEM (Pmode
, XEXP (PATTERN (insn
), 0));
2733 if (sched_deps_info
->use_cselib
)
2734 cselib_lookup_from_insn (x
, Pmode
, true, VOIDmode
, insn
);
2735 add_insn_mem_dependence (deps
, true, insn
, x
);
2739 case UNSPEC_VOLATILE
:
2740 flush_pending_lists (deps
, insn
, true, true);
2746 /* Traditional and volatile asm instructions must be considered to use
2747 and clobber all hard registers, all pseudo-registers and all of
2748 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2750 Consider for instance a volatile asm that changes the fpu rounding
2751 mode. An insn should not be moved across this even if it only uses
2752 pseudo-regs because it might give an incorrectly rounded result. */
2753 if ((code
!= ASM_OPERANDS
|| MEM_VOLATILE_P (x
))
2754 && !DEBUG_INSN_P (insn
))
2755 reg_pending_barrier
= TRUE_BARRIER
;
2757 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2758 We can not just fall through here since then we would be confused
2759 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2760 traditional asms unlike their normal usage. */
2762 if (code
== ASM_OPERANDS
)
2764 for (j
= 0; j
< ASM_OPERANDS_INPUT_LENGTH (x
); j
++)
2765 sched_analyze_2 (deps
, ASM_OPERANDS_INPUT (x
, j
), insn
);
2767 if (cslr_p
&& sched_deps_info
->finish_rhs
)
2768 sched_deps_info
->finish_rhs ();
2779 /* These both read and modify the result. We must handle them as writes
2780 to get proper dependencies for following instructions. We must handle
2781 them as reads to get proper dependencies from this to previous
2782 instructions. Thus we need to pass them to both sched_analyze_1
2783 and sched_analyze_2. We must call sched_analyze_2 first in order
2784 to get the proper antecedent for the read. */
2785 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
2786 sched_analyze_1 (deps
, x
, insn
);
2788 if (cslr_p
&& sched_deps_info
->finish_rhs
)
2789 sched_deps_info
->finish_rhs ();
2795 /* op0 = op0 + op1 */
2796 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
2797 sched_analyze_2 (deps
, XEXP (x
, 1), insn
);
2798 sched_analyze_1 (deps
, x
, insn
);
2800 if (cslr_p
&& sched_deps_info
->finish_rhs
)
2801 sched_deps_info
->finish_rhs ();
2809 /* Other cases: walk the insn. */
2810 fmt
= GET_RTX_FORMAT (code
);
2811 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2814 sched_analyze_2 (deps
, XEXP (x
, i
), insn
);
2815 else if (fmt
[i
] == 'E')
2816 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2817 sched_analyze_2 (deps
, XVECEXP (x
, i
, j
), insn
);
2820 if (cslr_p
&& sched_deps_info
->finish_rhs
)
2821 sched_deps_info
->finish_rhs ();
2824 /* Try to group two fuseable insns together to prevent scheduler
2825 from scheduling them apart. */
2828 sched_macro_fuse_insns (rtx insn
)
2832 if (any_condjump_p (insn
))
2834 unsigned int condreg1
, condreg2
;
2836 targetm
.fixed_condition_code_regs (&condreg1
, &condreg2
);
2837 cc_reg_1
= gen_rtx_REG (CCmode
, condreg1
);
2838 prev
= prev_nonnote_nondebug_insn (insn
);
2839 if (!reg_referenced_p (cc_reg_1
, PATTERN (insn
))
2841 || !modified_in_p (cc_reg_1
, prev
))
2846 rtx insn_set
= single_set (insn
);
2848 prev
= prev_nonnote_nondebug_insn (insn
);
2851 || !single_set (prev
)
2852 || !modified_in_p (SET_DEST (insn_set
), prev
))
2857 if (targetm
.sched
.macro_fusion_pair_p (prev
, insn
))
2858 SCHED_GROUP_P (insn
) = 1;
2862 /* Analyze an INSN with pattern X to find all dependencies. */
2864 sched_analyze_insn (struct deps_desc
*deps
, rtx x
, rtx insn
)
2866 RTX_CODE code
= GET_CODE (x
);
2869 reg_set_iterator rsi
;
2871 if (! reload_completed
)
2875 extract_insn (insn
);
2876 preprocess_constraints (insn
);
2877 ira_implicitly_set_insn_hard_regs (&temp
);
2878 AND_COMPL_HARD_REG_SET (temp
, ira_no_alloc_regs
);
2879 IOR_HARD_REG_SET (implicit_reg_pending_clobbers
, temp
);
2882 can_start_lhs_rhs_p
= (NONJUMP_INSN_P (insn
)
2885 /* Group compare and branch insns for macro-fusion. */
2886 if (targetm
.sched
.macro_fusion_p
2887 && targetm
.sched
.macro_fusion_p ())
2888 sched_macro_fuse_insns (insn
);
2891 /* Avoid moving trapping instructions across function calls that might
2892 not always return. */
2893 add_dependence_list (insn
, deps
->last_function_call_may_noreturn
,
2894 1, REG_DEP_ANTI
, true);
2896 /* We must avoid creating a situation in which two successors of the
2897 current block have different unwind info after scheduling. If at any
2898 point the two paths re-join this leads to incorrect unwind info. */
2899 /* ??? There are certain situations involving a forced frame pointer in
2900 which, with extra effort, we could fix up the unwind info at a later
2901 CFG join. However, it seems better to notice these cases earlier
2902 during prologue generation and avoid marking the frame pointer setup
2903 as frame-related at all. */
2904 if (RTX_FRAME_RELATED_P (insn
))
2906 /* Make sure prologue insn is scheduled before next jump. */
2907 deps
->sched_before_next_jump
2908 = alloc_INSN_LIST (insn
, deps
->sched_before_next_jump
);
2910 /* Make sure epilogue insn is scheduled after preceding jumps. */
2911 add_dependence_list (insn
, deps
->pending_jump_insns
, 1, REG_DEP_ANTI
,
2915 if (code
== COND_EXEC
)
2917 sched_analyze_2 (deps
, COND_EXEC_TEST (x
), insn
);
2919 /* ??? Should be recording conditions so we reduce the number of
2920 false dependencies. */
2921 x
= COND_EXEC_CODE (x
);
2922 code
= GET_CODE (x
);
2924 if (code
== SET
|| code
== CLOBBER
)
2926 sched_analyze_1 (deps
, x
, insn
);
2928 /* Bare clobber insns are used for letting life analysis, reg-stack
2929 and others know that a value is dead. Depend on the last call
2930 instruction so that reg-stack won't get confused. */
2931 if (code
== CLOBBER
)
2932 add_dependence_list (insn
, deps
->last_function_call
, 1,
2933 REG_DEP_OUTPUT
, true);
2935 else if (code
== PARALLEL
)
2937 for (i
= XVECLEN (x
, 0); i
--;)
2939 rtx sub
= XVECEXP (x
, 0, i
);
2940 code
= GET_CODE (sub
);
2942 if (code
== COND_EXEC
)
2944 sched_analyze_2 (deps
, COND_EXEC_TEST (sub
), insn
);
2945 sub
= COND_EXEC_CODE (sub
);
2946 code
= GET_CODE (sub
);
2948 if (code
== SET
|| code
== CLOBBER
)
2949 sched_analyze_1 (deps
, sub
, insn
);
2951 sched_analyze_2 (deps
, sub
, insn
);
2955 sched_analyze_2 (deps
, x
, insn
);
2957 /* Mark registers CLOBBERED or used by called function. */
2960 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
2962 if (GET_CODE (XEXP (link
, 0)) == CLOBBER
)
2963 sched_analyze_1 (deps
, XEXP (link
, 0), insn
);
2964 else if (GET_CODE (XEXP (link
, 0)) != SET
)
2965 sched_analyze_2 (deps
, XEXP (link
, 0), insn
);
2967 /* Don't schedule anything after a tail call, tail call needs
2968 to use at least all call-saved registers. */
2969 if (SIBLING_CALL_P (insn
))
2970 reg_pending_barrier
= TRUE_BARRIER
;
2971 else if (find_reg_note (insn
, REG_SETJMP
, NULL
))
2972 reg_pending_barrier
= MOVE_BARRIER
;
2978 next
= next_nonnote_nondebug_insn (insn
);
2979 if (next
&& BARRIER_P (next
))
2980 reg_pending_barrier
= MOVE_BARRIER
;
2983 rtx pending
, pending_mem
;
2985 if (sched_deps_info
->compute_jump_reg_dependencies
)
2987 (*sched_deps_info
->compute_jump_reg_dependencies
)
2988 (insn
, reg_pending_control_uses
);
2990 /* Make latency of jump equal to 0 by using anti-dependence. */
2991 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses
, 0, i
, rsi
)
2993 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
2994 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_ANTI
,
2996 add_dependence_list (insn
, reg_last
->implicit_sets
,
2997 0, REG_DEP_ANTI
, false);
2998 add_dependence_list (insn
, reg_last
->clobbers
, 0,
2999 REG_DEP_ANTI
, false);
3003 /* All memory writes and volatile reads must happen before the
3004 jump. Non-volatile reads must happen before the jump iff
3005 the result is needed by the above register used mask. */
3007 pending
= deps
->pending_write_insns
;
3008 pending_mem
= deps
->pending_write_mems
;
3011 if (! sched_insns_conditions_mutex_p (insn
, XEXP (pending
, 0)))
3012 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
3013 pending
= XEXP (pending
, 1);
3014 pending_mem
= XEXP (pending_mem
, 1);
3017 pending
= deps
->pending_read_insns
;
3018 pending_mem
= deps
->pending_read_mems
;
3021 if (MEM_VOLATILE_P (XEXP (pending_mem
, 0))
3022 && ! sched_insns_conditions_mutex_p (insn
, XEXP (pending
, 0)))
3023 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
3024 pending
= XEXP (pending
, 1);
3025 pending_mem
= XEXP (pending_mem
, 1);
3028 add_dependence_list (insn
, deps
->last_pending_memory_flush
, 1,
3029 REG_DEP_ANTI
, true);
3030 add_dependence_list (insn
, deps
->pending_jump_insns
, 1,
3031 REG_DEP_ANTI
, true);
3035 /* If this instruction can throw an exception, then moving it changes
3036 where block boundaries fall. This is mighty confusing elsewhere.
3037 Therefore, prevent such an instruction from being moved. Same for
3038 non-jump instructions that define block boundaries.
3039 ??? Unclear whether this is still necessary in EBB mode. If not,
3040 add_branch_dependences should be adjusted for RGN mode instead. */
3041 if (((CALL_P (insn
) || JUMP_P (insn
)) && can_throw_internal (insn
))
3042 || (NONJUMP_INSN_P (insn
) && control_flow_insn_p (insn
)))
3043 reg_pending_barrier
= MOVE_BARRIER
;
3045 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
3047 setup_insn_reg_uses (deps
, insn
);
3048 init_insn_reg_pressure_info (insn
);
3051 /* Add register dependencies for insn. */
3052 if (DEBUG_INSN_P (insn
))
3054 rtx prev
= deps
->last_debug_insn
;
3057 if (!deps
->readonly
)
3058 deps
->last_debug_insn
= insn
;
3061 add_dependence (insn
, prev
, REG_DEP_ANTI
);
3063 add_dependence_list (insn
, deps
->last_function_call
, 1,
3064 REG_DEP_ANTI
, false);
3066 if (!sel_sched_p ())
3067 for (u
= deps
->last_pending_memory_flush
; u
; u
= XEXP (u
, 1))
3068 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
3070 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses
, 0, i
, rsi
)
3072 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3073 add_dependence_list (insn
, reg_last
->sets
, 1, REG_DEP_ANTI
, false);
3074 /* There's no point in making REG_DEP_CONTROL dependencies for
3076 add_dependence_list (insn
, reg_last
->clobbers
, 1, REG_DEP_ANTI
,
3079 if (!deps
->readonly
)
3080 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
3082 CLEAR_REG_SET (reg_pending_uses
);
3084 /* Quite often, a debug insn will refer to stuff in the
3085 previous instruction, but the reason we want this
3086 dependency here is to make sure the scheduler doesn't
3087 gratuitously move a debug insn ahead. This could dirty
3088 DF flags and cause additional analysis that wouldn't have
3089 occurred in compilation without debug insns, and such
3090 additional analysis can modify the generated code. */
3091 prev
= PREV_INSN (insn
);
3093 if (prev
&& NONDEBUG_INSN_P (prev
))
3094 add_dependence (insn
, prev
, REG_DEP_ANTI
);
3098 regset_head set_or_clobbered
;
3100 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses
, 0, i
, rsi
)
3102 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3103 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_TRUE
, false);
3104 add_dependence_list (insn
, reg_last
->implicit_sets
, 0, REG_DEP_ANTI
,
3106 add_dependence_list (insn
, reg_last
->clobbers
, 0, REG_DEP_TRUE
,
3109 if (!deps
->readonly
)
3111 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
3112 reg_last
->uses_length
++;
3116 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
3117 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses
, i
))
3119 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3120 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_TRUE
, false);
3121 add_dependence_list (insn
, reg_last
->implicit_sets
, 0,
3122 REG_DEP_ANTI
, false);
3123 add_dependence_list (insn
, reg_last
->clobbers
, 0, REG_DEP_TRUE
,
3126 if (!deps
->readonly
)
3128 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
3129 reg_last
->uses_length
++;
3133 if (targetm
.sched
.exposed_pipeline
)
3135 INIT_REG_SET (&set_or_clobbered
);
3136 bitmap_ior (&set_or_clobbered
, reg_pending_clobbers
,
3138 EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered
, 0, i
, rsi
)
3140 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3142 for (list
= reg_last
->uses
; list
; list
= XEXP (list
, 1))
3144 rtx other
= XEXP (list
, 0);
3145 if (INSN_CACHED_COND (other
) != const_true_rtx
3146 && refers_to_regno_p (i
, i
+ 1, INSN_CACHED_COND (other
), NULL
))
3147 INSN_CACHED_COND (other
) = const_true_rtx
;
3152 /* If the current insn is conditional, we can't free any
3154 if (sched_has_condition_p (insn
))
3156 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers
, 0, i
, rsi
)
3158 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3159 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_OUTPUT
,
3161 add_dependence_list (insn
, reg_last
->implicit_sets
, 0,
3162 REG_DEP_ANTI
, false);
3163 add_dependence_list (insn
, reg_last
->uses
, 0, REG_DEP_ANTI
,
3165 add_dependence_list (insn
, reg_last
->control_uses
, 0,
3166 REG_DEP_CONTROL
, false);
3168 if (!deps
->readonly
)
3171 = alloc_INSN_LIST (insn
, reg_last
->clobbers
);
3172 reg_last
->clobbers_length
++;
3175 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets
, 0, i
, rsi
)
3177 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3178 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_OUTPUT
,
3180 add_dependence_list (insn
, reg_last
->implicit_sets
, 0,
3181 REG_DEP_ANTI
, false);
3182 add_dependence_list (insn
, reg_last
->clobbers
, 0, REG_DEP_OUTPUT
,
3184 add_dependence_list (insn
, reg_last
->uses
, 0, REG_DEP_ANTI
,
3186 add_dependence_list (insn
, reg_last
->control_uses
, 0,
3187 REG_DEP_CONTROL
, false);
3189 if (!deps
->readonly
)
3190 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
3195 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers
, 0, i
, rsi
)
3197 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3198 if (reg_last
->uses_length
> MAX_PENDING_LIST_LENGTH
3199 || reg_last
->clobbers_length
> MAX_PENDING_LIST_LENGTH
)
3201 add_dependence_list_and_free (deps
, insn
, ®_last
->sets
, 0,
3202 REG_DEP_OUTPUT
, false);
3203 add_dependence_list_and_free (deps
, insn
,
3204 ®_last
->implicit_sets
, 0,
3205 REG_DEP_ANTI
, false);
3206 add_dependence_list_and_free (deps
, insn
, ®_last
->uses
, 0,
3207 REG_DEP_ANTI
, false);
3208 add_dependence_list_and_free (deps
, insn
,
3209 ®_last
->control_uses
, 0,
3210 REG_DEP_ANTI
, false);
3211 add_dependence_list_and_free (deps
, insn
,
3212 ®_last
->clobbers
, 0,
3213 REG_DEP_OUTPUT
, false);
3215 if (!deps
->readonly
)
3217 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
3218 reg_last
->clobbers_length
= 0;
3219 reg_last
->uses_length
= 0;
3224 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_OUTPUT
,
3226 add_dependence_list (insn
, reg_last
->implicit_sets
, 0,
3227 REG_DEP_ANTI
, false);
3228 add_dependence_list (insn
, reg_last
->uses
, 0, REG_DEP_ANTI
,
3230 add_dependence_list (insn
, reg_last
->control_uses
, 0,
3231 REG_DEP_CONTROL
, false);
3234 if (!deps
->readonly
)
3236 reg_last
->clobbers_length
++;
3238 = alloc_INSN_LIST (insn
, reg_last
->clobbers
);
3241 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets
, 0, i
, rsi
)
3243 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3245 add_dependence_list_and_free (deps
, insn
, ®_last
->sets
, 0,
3246 REG_DEP_OUTPUT
, false);
3247 add_dependence_list_and_free (deps
, insn
,
3248 ®_last
->implicit_sets
,
3249 0, REG_DEP_ANTI
, false);
3250 add_dependence_list_and_free (deps
, insn
, ®_last
->clobbers
, 0,
3251 REG_DEP_OUTPUT
, false);
3252 add_dependence_list_and_free (deps
, insn
, ®_last
->uses
, 0,
3253 REG_DEP_ANTI
, false);
3254 add_dependence_list (insn
, reg_last
->control_uses
, 0,
3255 REG_DEP_CONTROL
, false);
3257 if (!deps
->readonly
)
3259 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
3260 reg_last
->uses_length
= 0;
3261 reg_last
->clobbers_length
= 0;
3265 if (!deps
->readonly
)
3267 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses
, 0, i
, rsi
)
3269 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3270 reg_last
->control_uses
3271 = alloc_INSN_LIST (insn
, reg_last
->control_uses
);
3276 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
3277 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers
, i
))
3279 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3280 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_ANTI
, false);
3281 add_dependence_list (insn
, reg_last
->clobbers
, 0, REG_DEP_ANTI
, false);
3282 add_dependence_list (insn
, reg_last
->uses
, 0, REG_DEP_ANTI
, false);
3283 add_dependence_list (insn
, reg_last
->control_uses
, 0, REG_DEP_ANTI
,
3286 if (!deps
->readonly
)
3287 reg_last
->implicit_sets
3288 = alloc_INSN_LIST (insn
, reg_last
->implicit_sets
);
3291 if (!deps
->readonly
)
3293 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_uses
);
3294 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_clobbers
);
3295 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_sets
);
3296 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
3297 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses
, i
)
3298 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers
, i
))
3299 SET_REGNO_REG_SET (&deps
->reg_last_in_use
, i
);
3301 /* Set up the pending barrier found. */
3302 deps
->last_reg_pending_barrier
= reg_pending_barrier
;
3305 CLEAR_REG_SET (reg_pending_uses
);
3306 CLEAR_REG_SET (reg_pending_clobbers
);
3307 CLEAR_REG_SET (reg_pending_sets
);
3308 CLEAR_REG_SET (reg_pending_control_uses
);
3309 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers
);
3310 CLEAR_HARD_REG_SET (implicit_reg_pending_uses
);
3312 /* Add dependencies if a scheduling barrier was found. */
3313 if (reg_pending_barrier
)
3315 /* In the case of barrier the most added dependencies are not
3316 real, so we use anti-dependence here. */
3317 if (sched_has_condition_p (insn
))
3319 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
, rsi
)
3321 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3322 add_dependence_list (insn
, reg_last
->uses
, 0, REG_DEP_ANTI
,
3324 add_dependence_list (insn
, reg_last
->sets
, 0,
3325 reg_pending_barrier
== TRUE_BARRIER
3326 ? REG_DEP_TRUE
: REG_DEP_ANTI
, true);
3327 add_dependence_list (insn
, reg_last
->implicit_sets
, 0,
3328 REG_DEP_ANTI
, true);
3329 add_dependence_list (insn
, reg_last
->clobbers
, 0,
3330 reg_pending_barrier
== TRUE_BARRIER
3331 ? REG_DEP_TRUE
: REG_DEP_ANTI
, true);
3336 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
, rsi
)
3338 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3339 add_dependence_list_and_free (deps
, insn
, ®_last
->uses
, 0,
3340 REG_DEP_ANTI
, true);
3341 add_dependence_list_and_free (deps
, insn
,
3342 ®_last
->control_uses
, 0,
3343 REG_DEP_CONTROL
, true);
3344 add_dependence_list_and_free (deps
, insn
, ®_last
->sets
, 0,
3345 reg_pending_barrier
== TRUE_BARRIER
3346 ? REG_DEP_TRUE
: REG_DEP_ANTI
,
3348 add_dependence_list_and_free (deps
, insn
,
3349 ®_last
->implicit_sets
, 0,
3350 REG_DEP_ANTI
, true);
3351 add_dependence_list_and_free (deps
, insn
, ®_last
->clobbers
, 0,
3352 reg_pending_barrier
== TRUE_BARRIER
3353 ? REG_DEP_TRUE
: REG_DEP_ANTI
,
3356 if (!deps
->readonly
)
3358 reg_last
->uses_length
= 0;
3359 reg_last
->clobbers_length
= 0;
3364 if (!deps
->readonly
)
3365 for (i
= 0; i
< (unsigned)deps
->max_reg
; i
++)
3367 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3368 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
3369 SET_REGNO_REG_SET (&deps
->reg_last_in_use
, i
);
3372 /* Don't flush pending lists on speculative checks for
3373 selective scheduling. */
3374 if (!sel_sched_p () || !sel_insn_is_speculation_check (insn
))
3375 flush_pending_lists (deps
, insn
, true, true);
3377 reg_pending_barrier
= NOT_A_BARRIER
;
3380 /* If a post-call group is still open, see if it should remain so.
3381 This insn must be a simple move of a hard reg to a pseudo or
3384 We must avoid moving these insns for correctness on targets
3385 with small register classes, and for special registers like
3386 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3387 hard regs for all targets. */
3389 if (deps
->in_post_call_group_p
)
3391 rtx tmp
, set
= single_set (insn
);
3392 int src_regno
, dest_regno
;
3396 if (DEBUG_INSN_P (insn
))
3397 /* We don't want to mark debug insns as part of the same
3398 sched group. We know they really aren't, but if we use
3399 debug insns to tell that a call group is over, we'll
3400 get different code if debug insns are not there and
3401 instructions that follow seem like they should be part
3404 Also, if we did, chain_to_prev_insn would move the
3405 deps of the debug insn to the call insn, modifying
3406 non-debug post-dependency counts of the debug insn
3407 dependencies and otherwise messing with the scheduling
3410 Instead, let such debug insns be scheduled freely, but
3411 keep the call group open in case there are insns that
3412 should be part of it afterwards. Since we grant debug
3413 insns higher priority than even sched group insns, it
3414 will all turn out all right. */
3415 goto debug_dont_end_call_group
;
3417 goto end_call_group
;
3420 tmp
= SET_DEST (set
);
3421 if (GET_CODE (tmp
) == SUBREG
)
3422 tmp
= SUBREG_REG (tmp
);
3424 dest_regno
= REGNO (tmp
);
3426 goto end_call_group
;
3428 tmp
= SET_SRC (set
);
3429 if (GET_CODE (tmp
) == SUBREG
)
3430 tmp
= SUBREG_REG (tmp
);
3431 if ((GET_CODE (tmp
) == PLUS
3432 || GET_CODE (tmp
) == MINUS
)
3433 && REG_P (XEXP (tmp
, 0))
3434 && REGNO (XEXP (tmp
, 0)) == STACK_POINTER_REGNUM
3435 && dest_regno
== STACK_POINTER_REGNUM
)
3436 src_regno
= STACK_POINTER_REGNUM
;
3437 else if (REG_P (tmp
))
3438 src_regno
= REGNO (tmp
);
3440 goto end_call_group
;
3442 if (src_regno
< FIRST_PSEUDO_REGISTER
3443 || dest_regno
< FIRST_PSEUDO_REGISTER
)
3446 && deps
->in_post_call_group_p
== post_call_initial
)
3447 deps
->in_post_call_group_p
= post_call
;
3449 if (!sel_sched_p () || sched_emulate_haifa_p
)
3451 SCHED_GROUP_P (insn
) = 1;
3452 CANT_MOVE (insn
) = 1;
3458 if (!deps
->readonly
)
3459 deps
->in_post_call_group_p
= not_post_call
;
3463 debug_dont_end_call_group
:
3464 if ((current_sched_info
->flags
& DO_SPECULATION
)
3465 && !sched_insn_is_legitimate_for_speculation_p (insn
, 0))
3466 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3470 sel_mark_hard_insn (insn
);
3473 sd_iterator_def sd_it
;
3476 for (sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
3477 sd_iterator_cond (&sd_it
, &dep
);)
3478 change_spec_dep_to_hard (sd_it
);
3482 /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
3483 honor their original ordering. */
3484 if (find_reg_note (insn
, REG_ARGS_SIZE
, NULL
))
3486 if (deps
->last_args_size
)
3487 add_dependence (insn
, deps
->last_args_size
, REG_DEP_OUTPUT
);
3488 deps
->last_args_size
= insn
;
3492 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3493 longjmp, loop forever, ...). */
3494 /* FIXME: Why can't this function just use flags_from_decl_or_type and
3495 test for ECF_NORETURN? */
3497 call_may_noreturn_p (rtx insn
)
3501 /* const or pure calls that aren't looping will always return. */
3502 if (RTL_CONST_OR_PURE_CALL_P (insn
)
3503 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn
))
3506 call
= get_call_rtx_from (insn
);
3507 if (call
&& GET_CODE (XEXP (XEXP (call
, 0), 0)) == SYMBOL_REF
)
3509 rtx symbol
= XEXP (XEXP (call
, 0), 0);
3510 if (SYMBOL_REF_DECL (symbol
)
3511 && TREE_CODE (SYMBOL_REF_DECL (symbol
)) == FUNCTION_DECL
)
3513 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol
))
3515 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol
)))
3518 case BUILT_IN_BCOPY
:
3519 case BUILT_IN_BZERO
:
3520 case BUILT_IN_INDEX
:
3521 case BUILT_IN_MEMCHR
:
3522 case BUILT_IN_MEMCMP
:
3523 case BUILT_IN_MEMCPY
:
3524 case BUILT_IN_MEMMOVE
:
3525 case BUILT_IN_MEMPCPY
:
3526 case BUILT_IN_MEMSET
:
3527 case BUILT_IN_RINDEX
:
3528 case BUILT_IN_STPCPY
:
3529 case BUILT_IN_STPNCPY
:
3530 case BUILT_IN_STRCAT
:
3531 case BUILT_IN_STRCHR
:
3532 case BUILT_IN_STRCMP
:
3533 case BUILT_IN_STRCPY
:
3534 case BUILT_IN_STRCSPN
:
3535 case BUILT_IN_STRLEN
:
3536 case BUILT_IN_STRNCAT
:
3537 case BUILT_IN_STRNCMP
:
3538 case BUILT_IN_STRNCPY
:
3539 case BUILT_IN_STRPBRK
:
3540 case BUILT_IN_STRRCHR
:
3541 case BUILT_IN_STRSPN
:
3542 case BUILT_IN_STRSTR
:
3543 /* Assume certain string/memory builtins always return. */
3551 /* For all other calls assume that they might not always return. */
3555 /* Return true if INSN should be made dependent on the previous instruction
3556 group, and if all INSN's dependencies should be moved to the first
3557 instruction of that group. */
3560 chain_to_prev_insn_p (rtx insn
)
3564 /* INSN forms a group with the previous instruction. */
3565 if (SCHED_GROUP_P (insn
))
3568 /* If the previous instruction clobbers a register R and this one sets
3569 part of R, the clobber was added specifically to help us track the
3570 liveness of R. There's no point scheduling the clobber and leaving
3571 INSN behind, especially if we move the clobber to another block. */
3572 prev
= prev_nonnote_nondebug_insn (insn
);
3575 && BLOCK_FOR_INSN (prev
) == BLOCK_FOR_INSN (insn
)
3576 && GET_CODE (PATTERN (prev
)) == CLOBBER
)
3578 x
= XEXP (PATTERN (prev
), 0);
3579 if (set_of (x
, insn
))
3586 /* Analyze INSN with DEPS as a context. */
3588 deps_analyze_insn (struct deps_desc
*deps
, rtx insn
)
3590 if (sched_deps_info
->start_insn
)
3591 sched_deps_info
->start_insn (insn
);
3593 /* Record the condition for this insn. */
3594 if (NONDEBUG_INSN_P (insn
))
3597 sched_get_condition_with_rev (insn
, NULL
);
3598 t
= INSN_CACHED_COND (insn
);
3599 INSN_COND_DEPS (insn
) = NULL_RTX
;
3600 if (reload_completed
3601 && (current_sched_info
->flags
& DO_PREDICATION
)
3603 && REG_P (XEXP (t
, 0))
3604 && CONSTANT_P (XEXP (t
, 1)))
3610 nregs
= hard_regno_nregs
[regno
][GET_MODE (t
)];
3614 struct deps_reg
*reg_last
= &deps
->reg_last
[regno
+ nregs
];
3615 t
= concat_INSN_LIST (reg_last
->sets
, t
);
3616 t
= concat_INSN_LIST (reg_last
->clobbers
, t
);
3617 t
= concat_INSN_LIST (reg_last
->implicit_sets
, t
);
3619 INSN_COND_DEPS (insn
) = t
;
3625 /* Make each JUMP_INSN (but not a speculative check)
3626 a scheduling barrier for memory references. */
3629 && sel_insn_is_speculation_check (insn
)))
3631 /* Keep the list a reasonable size. */
3632 if (deps
->pending_flush_length
++ > MAX_PENDING_LIST_LENGTH
)
3633 flush_pending_lists (deps
, insn
, true, true);
3635 deps
->pending_jump_insns
3636 = alloc_INSN_LIST (insn
, deps
->pending_jump_insns
);
3639 /* For each insn which shouldn't cross a jump, add a dependence. */
3640 add_dependence_list_and_free (deps
, insn
,
3641 &deps
->sched_before_next_jump
, 1,
3642 REG_DEP_ANTI
, true);
3644 sched_analyze_insn (deps
, PATTERN (insn
), insn
);
3646 else if (NONJUMP_INSN_P (insn
) || DEBUG_INSN_P (insn
))
3648 sched_analyze_insn (deps
, PATTERN (insn
), insn
);
3650 else if (CALL_P (insn
))
3654 CANT_MOVE (insn
) = 1;
3656 if (find_reg_note (insn
, REG_SETJMP
, NULL
))
3658 /* This is setjmp. Assume that all registers, not just
3659 hard registers, may be clobbered by this call. */
3660 reg_pending_barrier
= MOVE_BARRIER
;
3664 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
3665 /* A call may read and modify global register variables. */
3668 SET_REGNO_REG_SET (reg_pending_sets
, i
);
3669 SET_HARD_REG_BIT (implicit_reg_pending_uses
, i
);
3671 /* Other call-clobbered hard regs may be clobbered.
3672 Since we only have a choice between 'might be clobbered'
3673 and 'definitely not clobbered', we must include all
3674 partly call-clobbered registers here. */
3675 else if (HARD_REGNO_CALL_PART_CLOBBERED (i
, reg_raw_mode
[i
])
3676 || TEST_HARD_REG_BIT (regs_invalidated_by_call
, i
))
3677 SET_REGNO_REG_SET (reg_pending_clobbers
, i
);
3678 /* We don't know what set of fixed registers might be used
3679 by the function, but it is certain that the stack pointer
3680 is among them, but be conservative. */
3681 else if (fixed_regs
[i
])
3682 SET_HARD_REG_BIT (implicit_reg_pending_uses
, i
);
3683 /* The frame pointer is normally not used by the function
3684 itself, but by the debugger. */
3685 /* ??? MIPS o32 is an exception. It uses the frame pointer
3686 in the macro expansion of jal but does not represent this
3687 fact in the call_insn rtl. */
3688 else if (i
== FRAME_POINTER_REGNUM
3689 || (i
== HARD_FRAME_POINTER_REGNUM
3690 && (! reload_completed
|| frame_pointer_needed
)))
3691 SET_HARD_REG_BIT (implicit_reg_pending_uses
, i
);
3694 /* For each insn which shouldn't cross a call, add a dependence
3695 between that insn and this call insn. */
3696 add_dependence_list_and_free (deps
, insn
,
3697 &deps
->sched_before_next_call
, 1,
3698 REG_DEP_ANTI
, true);
3700 sched_analyze_insn (deps
, PATTERN (insn
), insn
);
3702 /* If CALL would be in a sched group, then this will violate
3703 convention that sched group insns have dependencies only on the
3704 previous instruction.
3706 Of course one can say: "Hey! What about head of the sched group?"
3707 And I will answer: "Basic principles (one dep per insn) are always
3709 gcc_assert (!SCHED_GROUP_P (insn
));
3711 /* In the absence of interprocedural alias analysis, we must flush
3712 all pending reads and writes, and start new dependencies starting
3713 from here. But only flush writes for constant calls (which may
3714 be passed a pointer to something we haven't written yet). */
3715 flush_pending_lists (deps
, insn
, true, ! RTL_CONST_OR_PURE_CALL_P (insn
));
3717 if (!deps
->readonly
)
3719 /* Remember the last function call for limiting lifetimes. */
3720 free_INSN_LIST_list (&deps
->last_function_call
);
3721 deps
->last_function_call
= alloc_INSN_LIST (insn
, NULL_RTX
);
3723 if (call_may_noreturn_p (insn
))
3725 /* Remember the last function call that might not always return
3726 normally for limiting moves of trapping insns. */
3727 free_INSN_LIST_list (&deps
->last_function_call_may_noreturn
);
3728 deps
->last_function_call_may_noreturn
3729 = alloc_INSN_LIST (insn
, NULL_RTX
);
3732 /* Before reload, begin a post-call group, so as to keep the
3733 lifetimes of hard registers correct. */
3734 if (! reload_completed
)
3735 deps
->in_post_call_group_p
= post_call
;
3739 if (sched_deps_info
->use_cselib
)
3740 cselib_process_insn (insn
);
3742 if (sched_deps_info
->finish_insn
)
3743 sched_deps_info
->finish_insn ();
3745 /* Fixup the dependencies in the sched group. */
3746 if ((NONJUMP_INSN_P (insn
) || JUMP_P (insn
))
3747 && chain_to_prev_insn_p (insn
)
3749 chain_to_prev_insn (insn
);
3752 /* Initialize DEPS for the new block beginning with HEAD. */
3754 deps_start_bb (struct deps_desc
*deps
, rtx head
)
3756 gcc_assert (!deps
->readonly
);
3758 /* Before reload, if the previous block ended in a call, show that
3759 we are inside a post-call group, so as to keep the lifetimes of
3760 hard registers correct. */
3761 if (! reload_completed
&& !LABEL_P (head
))
3763 rtx insn
= prev_nonnote_nondebug_insn (head
);
3765 if (insn
&& CALL_P (insn
))
3766 deps
->in_post_call_group_p
= post_call_initial
;
3770 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3771 dependencies for each insn. */
3773 sched_analyze (struct deps_desc
*deps
, rtx head
, rtx tail
)
3777 if (sched_deps_info
->use_cselib
)
3778 cselib_init (CSELIB_RECORD_MEMORY
);
3780 deps_start_bb (deps
, head
);
3782 for (insn
= head
;; insn
= NEXT_INSN (insn
))
3787 /* And initialize deps_lists. */
3788 sd_init_insn (insn
);
3789 /* Clean up SCHED_GROUP_P which may be set by last
3791 if (SCHED_GROUP_P (insn
))
3792 SCHED_GROUP_P (insn
) = 0;
3795 deps_analyze_insn (deps
, insn
);
3799 if (sched_deps_info
->use_cselib
)
3807 /* Helper for sched_free_deps ().
3808 Delete INSN's (RESOLVED_P) backward dependencies. */
3810 delete_dep_nodes_in_back_deps (rtx insn
, bool resolved_p
)
3812 sd_iterator_def sd_it
;
3814 sd_list_types_def types
;
3817 types
= SD_LIST_RES_BACK
;
3819 types
= SD_LIST_BACK
;
3821 for (sd_it
= sd_iterator_start (insn
, types
);
3822 sd_iterator_cond (&sd_it
, &dep
);)
3824 dep_link_t link
= *sd_it
.linkp
;
3825 dep_node_t node
= DEP_LINK_NODE (link
);
3826 deps_list_t back_list
;
3827 deps_list_t forw_list
;
3829 get_back_and_forw_lists (dep
, resolved_p
, &back_list
, &forw_list
);
3830 remove_from_deps_list (link
, back_list
);
3831 delete_dep_node (node
);
3835 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3838 sched_free_deps (rtx head
, rtx tail
, bool resolved_p
)
3841 rtx next_tail
= NEXT_INSN (tail
);
3843 /* We make two passes since some insns may be scheduled before their
3844 dependencies are resolved. */
3845 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
3846 if (INSN_P (insn
) && INSN_LUID (insn
) > 0)
3848 /* Clear forward deps and leave the dep_nodes to the
3849 corresponding back_deps list. */
3851 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn
));
3853 clear_deps_list (INSN_FORW_DEPS (insn
));
3855 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
3856 if (INSN_P (insn
) && INSN_LUID (insn
) > 0)
3858 /* Clear resolved back deps together with its dep_nodes. */
3859 delete_dep_nodes_in_back_deps (insn
, resolved_p
);
3861 sd_finish_insn (insn
);
3865 /* Initialize variables for region data dependence analysis.
3866 When LAZY_REG_LAST is true, do not allocate reg_last array
3867 of struct deps_desc immediately. */
3870 init_deps (struct deps_desc
*deps
, bool lazy_reg_last
)
3872 int max_reg
= (reload_completed
? FIRST_PSEUDO_REGISTER
: max_reg_num ());
3874 deps
->max_reg
= max_reg
;
3876 deps
->reg_last
= NULL
;
3878 deps
->reg_last
= XCNEWVEC (struct deps_reg
, max_reg
);
3879 INIT_REG_SET (&deps
->reg_last_in_use
);
3881 deps
->pending_read_insns
= 0;
3882 deps
->pending_read_mems
= 0;
3883 deps
->pending_write_insns
= 0;
3884 deps
->pending_write_mems
= 0;
3885 deps
->pending_jump_insns
= 0;
3886 deps
->pending_read_list_length
= 0;
3887 deps
->pending_write_list_length
= 0;
3888 deps
->pending_flush_length
= 0;
3889 deps
->last_pending_memory_flush
= 0;
3890 deps
->last_function_call
= 0;
3891 deps
->last_function_call_may_noreturn
= 0;
3892 deps
->sched_before_next_call
= 0;
3893 deps
->sched_before_next_jump
= 0;
3894 deps
->in_post_call_group_p
= not_post_call
;
3895 deps
->last_debug_insn
= 0;
3896 deps
->last_args_size
= 0;
3897 deps
->last_reg_pending_barrier
= NOT_A_BARRIER
;
3901 /* Init only reg_last field of DEPS, which was not allocated before as
3902 we inited DEPS lazily. */
3904 init_deps_reg_last (struct deps_desc
*deps
)
3906 gcc_assert (deps
&& deps
->max_reg
> 0);
3907 gcc_assert (deps
->reg_last
== NULL
);
3909 deps
->reg_last
= XCNEWVEC (struct deps_reg
, deps
->max_reg
);
3913 /* Free insn lists found in DEPS. */
3916 free_deps (struct deps_desc
*deps
)
3919 reg_set_iterator rsi
;
3921 /* We set max_reg to 0 when this context was already freed. */
3922 if (deps
->max_reg
== 0)
3924 gcc_assert (deps
->reg_last
== NULL
);
3929 free_INSN_LIST_list (&deps
->pending_read_insns
);
3930 free_EXPR_LIST_list (&deps
->pending_read_mems
);
3931 free_INSN_LIST_list (&deps
->pending_write_insns
);
3932 free_EXPR_LIST_list (&deps
->pending_write_mems
);
3933 free_INSN_LIST_list (&deps
->last_pending_memory_flush
);
3935 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3936 times. For a testcase with 42000 regs and 8000 small basic blocks,
3937 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
3938 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
, rsi
)
3940 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3942 free_INSN_LIST_list (®_last
->uses
);
3944 free_INSN_LIST_list (®_last
->sets
);
3945 if (reg_last
->implicit_sets
)
3946 free_INSN_LIST_list (®_last
->implicit_sets
);
3947 if (reg_last
->control_uses
)
3948 free_INSN_LIST_list (®_last
->control_uses
);
3949 if (reg_last
->clobbers
)
3950 free_INSN_LIST_list (®_last
->clobbers
);
3952 CLEAR_REG_SET (&deps
->reg_last_in_use
);
3954 /* As we initialize reg_last lazily, it is possible that we didn't allocate
3956 free (deps
->reg_last
);
3957 deps
->reg_last
= NULL
;
3962 /* Remove INSN from dependence contexts DEPS. */
3964 remove_from_deps (struct deps_desc
*deps
, rtx insn
)
3968 reg_set_iterator rsi
;
3970 removed
= remove_from_both_dependence_lists (insn
, &deps
->pending_read_insns
,
3971 &deps
->pending_read_mems
);
3972 if (!DEBUG_INSN_P (insn
))
3973 deps
->pending_read_list_length
-= removed
;
3974 removed
= remove_from_both_dependence_lists (insn
, &deps
->pending_write_insns
,
3975 &deps
->pending_write_mems
);
3976 deps
->pending_write_list_length
-= removed
;
3978 removed
= remove_from_dependence_list (insn
, &deps
->pending_jump_insns
);
3979 deps
->pending_flush_length
-= removed
;
3980 removed
= remove_from_dependence_list (insn
, &deps
->last_pending_memory_flush
);
3981 deps
->pending_flush_length
-= removed
;
3983 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
, rsi
)
3985 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3987 remove_from_dependence_list (insn
, ®_last
->uses
);
3989 remove_from_dependence_list (insn
, ®_last
->sets
);
3990 if (reg_last
->implicit_sets
)
3991 remove_from_dependence_list (insn
, ®_last
->implicit_sets
);
3992 if (reg_last
->clobbers
)
3993 remove_from_dependence_list (insn
, ®_last
->clobbers
);
3994 if (!reg_last
->uses
&& !reg_last
->sets
&& !reg_last
->implicit_sets
3995 && !reg_last
->clobbers
)
3996 CLEAR_REGNO_REG_SET (&deps
->reg_last_in_use
, i
);
4001 remove_from_dependence_list (insn
, &deps
->last_function_call
);
4002 remove_from_dependence_list (insn
,
4003 &deps
->last_function_call_may_noreturn
);
4005 remove_from_dependence_list (insn
, &deps
->sched_before_next_call
);
4008 /* Init deps data vector. */
4010 init_deps_data_vector (void)
4012 int reserve
= (sched_max_luid
+ 1 - h_d_i_d
.length ());
4013 if (reserve
> 0 && ! h_d_i_d
.space (reserve
))
4014 h_d_i_d
.safe_grow_cleared (3 * sched_max_luid
/ 2);
4017 /* If it is profitable to use them, initialize or extend (depending on
4018 GLOBAL_P) dependency data. */
4020 sched_deps_init (bool global_p
)
4022 /* Average number of insns in the basic block.
4023 '+ 1' is used to make it nonzero. */
4024 int insns_in_block
= sched_max_luid
/ n_basic_blocks_for_fn (cfun
) + 1;
4026 init_deps_data_vector ();
4028 /* We use another caching mechanism for selective scheduling, so
4029 we don't use this one. */
4030 if (!sel_sched_p () && global_p
&& insns_in_block
> 100 * 5)
4032 /* ?!? We could save some memory by computing a per-region luid mapping
4033 which could reduce both the number of vectors in the cache and the
4034 size of each vector. Instead we just avoid the cache entirely unless
4035 the average number of instructions in a basic block is very high. See
4036 the comment before the declaration of true_dependency_cache for
4037 what we consider "very high". */
4039 extend_dependency_caches (sched_max_luid
, true);
4044 dl_pool
= create_alloc_pool ("deps_list", sizeof (struct _deps_list
),
4045 /* Allocate lists for one block at a time. */
4047 dn_pool
= create_alloc_pool ("dep_node", sizeof (struct _dep_node
),
4048 /* Allocate nodes for one block at a time.
4049 We assume that average insn has
4051 5 * insns_in_block
);
4056 /* Create or extend (depending on CREATE_P) dependency caches to
4059 extend_dependency_caches (int n
, bool create_p
)
4061 if (create_p
|| true_dependency_cache
)
4063 int i
, luid
= cache_size
+ n
;
4065 true_dependency_cache
= XRESIZEVEC (bitmap_head
, true_dependency_cache
,
4067 output_dependency_cache
= XRESIZEVEC (bitmap_head
,
4068 output_dependency_cache
, luid
);
4069 anti_dependency_cache
= XRESIZEVEC (bitmap_head
, anti_dependency_cache
,
4071 control_dependency_cache
= XRESIZEVEC (bitmap_head
, control_dependency_cache
,
4074 if (current_sched_info
->flags
& DO_SPECULATION
)
4075 spec_dependency_cache
= XRESIZEVEC (bitmap_head
, spec_dependency_cache
,
4078 for (i
= cache_size
; i
< luid
; i
++)
4080 bitmap_initialize (&true_dependency_cache
[i
], 0);
4081 bitmap_initialize (&output_dependency_cache
[i
], 0);
4082 bitmap_initialize (&anti_dependency_cache
[i
], 0);
4083 bitmap_initialize (&control_dependency_cache
[i
], 0);
4085 if (current_sched_info
->flags
& DO_SPECULATION
)
4086 bitmap_initialize (&spec_dependency_cache
[i
], 0);
4092 /* Finalize dependency information for the whole function. */
4094 sched_deps_finish (void)
4096 gcc_assert (deps_pools_are_empty_p ());
4097 free_alloc_pool_if_empty (&dn_pool
);
4098 free_alloc_pool_if_empty (&dl_pool
);
4099 gcc_assert (dn_pool
== NULL
&& dl_pool
== NULL
);
4104 if (true_dependency_cache
)
4108 for (i
= 0; i
< cache_size
; i
++)
4110 bitmap_clear (&true_dependency_cache
[i
]);
4111 bitmap_clear (&output_dependency_cache
[i
]);
4112 bitmap_clear (&anti_dependency_cache
[i
]);
4113 bitmap_clear (&control_dependency_cache
[i
]);
4115 if (sched_deps_info
->generate_spec_deps
)
4116 bitmap_clear (&spec_dependency_cache
[i
]);
4118 free (true_dependency_cache
);
4119 true_dependency_cache
= NULL
;
4120 free (output_dependency_cache
);
4121 output_dependency_cache
= NULL
;
4122 free (anti_dependency_cache
);
4123 anti_dependency_cache
= NULL
;
4124 free (control_dependency_cache
);
4125 control_dependency_cache
= NULL
;
4127 if (sched_deps_info
->generate_spec_deps
)
4129 free (spec_dependency_cache
);
4130 spec_dependency_cache
= NULL
;
4136 /* Initialize some global variables needed by the dependency analysis
4140 init_deps_global (void)
4142 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers
);
4143 CLEAR_HARD_REG_SET (implicit_reg_pending_uses
);
4144 reg_pending_sets
= ALLOC_REG_SET (®_obstack
);
4145 reg_pending_clobbers
= ALLOC_REG_SET (®_obstack
);
4146 reg_pending_uses
= ALLOC_REG_SET (®_obstack
);
4147 reg_pending_control_uses
= ALLOC_REG_SET (®_obstack
);
4148 reg_pending_barrier
= NOT_A_BARRIER
;
4150 if (!sel_sched_p () || sched_emulate_haifa_p
)
4152 sched_deps_info
->start_insn
= haifa_start_insn
;
4153 sched_deps_info
->finish_insn
= haifa_finish_insn
;
4155 sched_deps_info
->note_reg_set
= haifa_note_reg_set
;
4156 sched_deps_info
->note_reg_clobber
= haifa_note_reg_clobber
;
4157 sched_deps_info
->note_reg_use
= haifa_note_reg_use
;
4159 sched_deps_info
->note_mem_dep
= haifa_note_mem_dep
;
4160 sched_deps_info
->note_dep
= haifa_note_dep
;
4164 /* Free everything used by the dependency analysis code. */
4167 finish_deps_global (void)
4169 FREE_REG_SET (reg_pending_sets
);
4170 FREE_REG_SET (reg_pending_clobbers
);
4171 FREE_REG_SET (reg_pending_uses
);
4172 FREE_REG_SET (reg_pending_control_uses
);
4175 /* Estimate the weakness of dependence between MEM1 and MEM2. */
4177 estimate_dep_weak (rtx mem1
, rtx mem2
)
4182 /* MEMs are the same - don't speculate. */
4183 return MIN_DEP_WEAK
;
4185 r1
= XEXP (mem1
, 0);
4186 r2
= XEXP (mem2
, 0);
4189 || (REG_P (r1
) && REG_P (r2
)
4190 && REGNO (r1
) == REGNO (r2
)))
4191 /* Again, MEMs are the same. */
4192 return MIN_DEP_WEAK
;
4193 else if ((REG_P (r1
) && !REG_P (r2
))
4194 || (!REG_P (r1
) && REG_P (r2
)))
4195 /* Different addressing modes - reason to be more speculative,
4197 return NO_DEP_WEAK
- (NO_DEP_WEAK
- UNCERTAIN_DEP_WEAK
) / 2;
4199 /* We can't say anything about the dependence. */
4200 return UNCERTAIN_DEP_WEAK
;
4203 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4204 This function can handle same INSN and ELEM (INSN == ELEM).
4205 It is a convenience wrapper. */
4207 add_dependence_1 (rtx insn
, rtx elem
, enum reg_note dep_type
)
4212 if (dep_type
== REG_DEP_TRUE
)
4214 else if (dep_type
== REG_DEP_OUTPUT
)
4216 else if (dep_type
== REG_DEP_CONTROL
)
4220 gcc_assert (dep_type
== REG_DEP_ANTI
);
4224 /* When add_dependence is called from inside sched-deps.c, we expect
4225 cur_insn to be non-null. */
4226 internal
= cur_insn
!= NULL
;
4228 gcc_assert (insn
== cur_insn
);
4232 note_dep (elem
, ds
);
4237 /* Return weakness of speculative type TYPE in the dep_status DS,
4238 without checking to prevent ICEs on malformed input. */
4240 get_dep_weak_1 (ds_t ds
, ds_t type
)
4246 case BEGIN_DATA
: ds
>>= BEGIN_DATA_BITS_OFFSET
; break;
4247 case BE_IN_DATA
: ds
>>= BE_IN_DATA_BITS_OFFSET
; break;
4248 case BEGIN_CONTROL
: ds
>>= BEGIN_CONTROL_BITS_OFFSET
; break;
4249 case BE_IN_CONTROL
: ds
>>= BE_IN_CONTROL_BITS_OFFSET
; break;
4250 default: gcc_unreachable ();
4256 /* Return weakness of speculative type TYPE in the dep_status DS. */
4258 get_dep_weak (ds_t ds
, ds_t type
)
4260 dw_t dw
= get_dep_weak_1 (ds
, type
);
4262 gcc_assert (MIN_DEP_WEAK
<= dw
&& dw
<= MAX_DEP_WEAK
);
4266 /* Return the dep_status, which has the same parameters as DS, except for
4267 speculative type TYPE, that will have weakness DW. */
4269 set_dep_weak (ds_t ds
, ds_t type
, dw_t dw
)
4271 gcc_assert (MIN_DEP_WEAK
<= dw
&& dw
<= MAX_DEP_WEAK
);
4276 case BEGIN_DATA
: ds
|= ((ds_t
) dw
) << BEGIN_DATA_BITS_OFFSET
; break;
4277 case BE_IN_DATA
: ds
|= ((ds_t
) dw
) << BE_IN_DATA_BITS_OFFSET
; break;
4278 case BEGIN_CONTROL
: ds
|= ((ds_t
) dw
) << BEGIN_CONTROL_BITS_OFFSET
; break;
4279 case BE_IN_CONTROL
: ds
|= ((ds_t
) dw
) << BE_IN_CONTROL_BITS_OFFSET
; break;
4280 default: gcc_unreachable ();
4285 /* Return the join of two dep_statuses DS1 and DS2.
4286 If MAX_P is true then choose the greater probability,
4287 otherwise multiply probabilities.
4288 This function assumes that both DS1 and DS2 contain speculative bits. */
4290 ds_merge_1 (ds_t ds1
, ds_t ds2
, bool max_p
)
4294 gcc_assert ((ds1
& SPECULATIVE
) && (ds2
& SPECULATIVE
));
4296 ds
= (ds1
& DEP_TYPES
) | (ds2
& DEP_TYPES
);
4298 t
= FIRST_SPEC_TYPE
;
4301 if ((ds1
& t
) && !(ds2
& t
))
4303 else if (!(ds1
& t
) && (ds2
& t
))
4305 else if ((ds1
& t
) && (ds2
& t
))
4307 dw_t dw1
= get_dep_weak (ds1
, t
);
4308 dw_t dw2
= get_dep_weak (ds2
, t
);
4313 dw
= ((ds_t
) dw1
) * ((ds_t
) dw2
);
4315 if (dw
< MIN_DEP_WEAK
)
4326 ds
= set_dep_weak (ds
, t
, (dw_t
) dw
);
4329 if (t
== LAST_SPEC_TYPE
)
4331 t
<<= SPEC_TYPE_SHIFT
;
4338 /* Return the join of two dep_statuses DS1 and DS2.
4339 This function assumes that both DS1 and DS2 contain speculative bits. */
4341 ds_merge (ds_t ds1
, ds_t ds2
)
4343 return ds_merge_1 (ds1
, ds2
, false);
4346 /* Return the join of two dep_statuses DS1 and DS2. */
4348 ds_full_merge (ds_t ds
, ds_t ds2
, rtx mem1
, rtx mem2
)
4350 ds_t new_status
= ds
| ds2
;
4352 if (new_status
& SPECULATIVE
)
4354 if ((ds
&& !(ds
& SPECULATIVE
))
4355 || (ds2
&& !(ds2
& SPECULATIVE
)))
4356 /* Then this dep can't be speculative. */
4357 new_status
&= ~SPECULATIVE
;
4360 /* Both are speculative. Merging probabilities. */
4365 dw
= estimate_dep_weak (mem1
, mem2
);
4366 ds
= set_dep_weak (ds
, BEGIN_DATA
, dw
);
4374 new_status
= ds_merge (ds2
, ds
);
4381 /* Return the join of DS1 and DS2. Use maximum instead of multiplying
4384 ds_max_merge (ds_t ds1
, ds_t ds2
)
4386 if (ds1
== 0 && ds2
== 0)
4389 if (ds1
== 0 && ds2
!= 0)
4392 if (ds1
!= 0 && ds2
== 0)
4395 return ds_merge_1 (ds1
, ds2
, true);
4398 /* Return the probability of speculation success for the speculation
4406 dt
= FIRST_SPEC_TYPE
;
4411 res
*= (ds_t
) get_dep_weak (ds
, dt
);
4415 if (dt
== LAST_SPEC_TYPE
)
4417 dt
<<= SPEC_TYPE_SHIFT
;
4423 res
/= MAX_DEP_WEAK
;
4425 if (res
< MIN_DEP_WEAK
)
4428 gcc_assert (res
<= MAX_DEP_WEAK
);
4433 /* Return a dep status that contains all speculation types of DS. */
4435 ds_get_speculation_types (ds_t ds
)
4437 if (ds
& BEGIN_DATA
)
4439 if (ds
& BE_IN_DATA
)
4441 if (ds
& BEGIN_CONTROL
)
4442 ds
|= BEGIN_CONTROL
;
4443 if (ds
& BE_IN_CONTROL
)
4444 ds
|= BE_IN_CONTROL
;
4446 return ds
& SPECULATIVE
;
4449 /* Return a dep status that contains maximal weakness for each speculation
4450 type present in DS. */
4452 ds_get_max_dep_weak (ds_t ds
)
4454 if (ds
& BEGIN_DATA
)
4455 ds
= set_dep_weak (ds
, BEGIN_DATA
, MAX_DEP_WEAK
);
4456 if (ds
& BE_IN_DATA
)
4457 ds
= set_dep_weak (ds
, BE_IN_DATA
, MAX_DEP_WEAK
);
4458 if (ds
& BEGIN_CONTROL
)
4459 ds
= set_dep_weak (ds
, BEGIN_CONTROL
, MAX_DEP_WEAK
);
4460 if (ds
& BE_IN_CONTROL
)
4461 ds
= set_dep_weak (ds
, BE_IN_CONTROL
, MAX_DEP_WEAK
);
4466 /* Dump information about the dependence status S. */
4468 dump_ds (FILE *f
, ds_t s
)
4473 fprintf (f
, "BEGIN_DATA: %d; ", get_dep_weak_1 (s
, BEGIN_DATA
));
4475 fprintf (f
, "BE_IN_DATA: %d; ", get_dep_weak_1 (s
, BE_IN_DATA
));
4476 if (s
& BEGIN_CONTROL
)
4477 fprintf (f
, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s
, BEGIN_CONTROL
));
4478 if (s
& BE_IN_CONTROL
)
4479 fprintf (f
, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s
, BE_IN_CONTROL
));
4482 fprintf (f
, "HARD_DEP; ");
4485 fprintf (f
, "DEP_TRUE; ");
4487 fprintf (f
, "DEP_OUTPUT; ");
4489 fprintf (f
, "DEP_ANTI; ");
4490 if (s
& DEP_CONTROL
)
4491 fprintf (f
, "DEP_CONTROL; ");
4499 dump_ds (stderr
, s
);
4500 fprintf (stderr
, "\n");
4503 #ifdef ENABLE_CHECKING
4504 /* Verify that dependence type and status are consistent.
4505 If RELAXED_P is true, then skip dep_weakness checks. */
4507 check_dep (dep_t dep
, bool relaxed_p
)
4509 enum reg_note dt
= DEP_TYPE (dep
);
4510 ds_t ds
= DEP_STATUS (dep
);
4512 gcc_assert (DEP_PRO (dep
) != DEP_CON (dep
));
4514 if (!(current_sched_info
->flags
& USE_DEPS_LIST
))
4516 gcc_assert (ds
== 0);
4520 /* Check that dependence type contains the same bits as the status. */
4521 if (dt
== REG_DEP_TRUE
)
4522 gcc_assert (ds
& DEP_TRUE
);
4523 else if (dt
== REG_DEP_OUTPUT
)
4524 gcc_assert ((ds
& DEP_OUTPUT
)
4525 && !(ds
& DEP_TRUE
));
4526 else if (dt
== REG_DEP_ANTI
)
4527 gcc_assert ((ds
& DEP_ANTI
)
4528 && !(ds
& (DEP_OUTPUT
| DEP_TRUE
)));
4530 gcc_assert (dt
== REG_DEP_CONTROL
4531 && (ds
& DEP_CONTROL
)
4532 && !(ds
& (DEP_OUTPUT
| DEP_ANTI
| DEP_TRUE
)));
4534 /* HARD_DEP can not appear in dep_status of a link. */
4535 gcc_assert (!(ds
& HARD_DEP
));
4537 /* Check that dependence status is set correctly when speculation is not
4539 if (!sched_deps_info
->generate_spec_deps
)
4540 gcc_assert (!(ds
& SPECULATIVE
));
4541 else if (ds
& SPECULATIVE
)
4545 ds_t type
= FIRST_SPEC_TYPE
;
4547 /* Check that dependence weakness is in proper range. */
4551 get_dep_weak (ds
, type
);
4553 if (type
== LAST_SPEC_TYPE
)
4555 type
<<= SPEC_TYPE_SHIFT
;
4560 if (ds
& BEGIN_SPEC
)
4562 /* Only true dependence can be data speculative. */
4563 if (ds
& BEGIN_DATA
)
4564 gcc_assert (ds
& DEP_TRUE
);
4566 /* Control dependencies in the insn scheduler are represented by
4567 anti-dependencies, therefore only anti dependence can be
4568 control speculative. */
4569 if (ds
& BEGIN_CONTROL
)
4570 gcc_assert (ds
& DEP_ANTI
);
4574 /* Subsequent speculations should resolve true dependencies. */
4575 gcc_assert ((ds
& DEP_TYPES
) == DEP_TRUE
);
4578 /* Check that true and anti dependencies can't have other speculative
4581 gcc_assert (ds
& (BEGIN_DATA
| BE_IN_SPEC
));
4582 /* An output dependence can't be speculative at all. */
4583 gcc_assert (!(ds
& DEP_OUTPUT
));
4585 gcc_assert (ds
& BEGIN_CONTROL
);
4588 #endif /* ENABLE_CHECKING */
4590 /* The following code discovers opportunities to switch a memory reference
4591 and an increment by modifying the address. We ensure that this is done
4592 only for dependencies that are only used to show a single register
4593 dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4594 instruction involved is subject to only one dep that can cause a pattern
4597 When we discover a suitable dependency, we fill in the dep_replacement
4598 structure to show how to modify the memory reference. */
4600 /* Holds information about a pair of memory reference and register increment
4601 insns which depend on each other, but could possibly be interchanged. */
4608 /* A register occurring in the memory address for which we wish to break
4609 the dependence. This must be identical to the destination register of
4612 /* Any kind of index that is added to that register. */
4614 /* The constant offset used in the memory address. */
4615 HOST_WIDE_INT mem_constant
;
4616 /* The constant added in the increment insn. Negated if the increment is
4617 after the memory address. */
4618 HOST_WIDE_INT inc_constant
;
4619 /* The source register used in the increment. May be different from mem_reg0
4620 if the increment occurs before the memory address. */
4624 /* Verify that the memory location described in MII can be replaced with
4625 one using NEW_ADDR. Return the new memory reference or NULL_RTX. The
4626 insn remains unchanged by this function. */
4629 attempt_change (struct mem_inc_info
*mii
, rtx new_addr
)
4631 rtx mem
= *mii
->mem_loc
;
4634 /* Jump through a lot of hoops to keep the attributes up to date. We
4635 do not want to call one of the change address variants that take
4636 an offset even though we know the offset in many cases. These
4637 assume you are changing where the address is pointing by the
4639 new_mem
= replace_equiv_address_nv (mem
, new_addr
);
4640 if (! validate_change (mii
->mem_insn
, mii
->mem_loc
, new_mem
, 0))
4642 if (sched_verbose
>= 5)
4643 fprintf (sched_dump
, "validation failure\n");
4647 /* Put back the old one. */
4648 validate_change (mii
->mem_insn
, mii
->mem_loc
, mem
, 0);
4653 /* Return true if INSN is of a form "a = b op c" where a and b are
4654 regs. op is + if c is a reg and +|- if c is a const. Fill in
4655 informantion in MII about what is found.
4656 BEFORE_MEM indicates whether the increment is found before or after
4657 a corresponding memory reference. */
4660 parse_add_or_inc (struct mem_inc_info
*mii
, rtx insn
, bool before_mem
)
4662 rtx pat
= single_set (insn
);
4666 if (RTX_FRAME_RELATED_P (insn
) || !pat
)
4669 /* Result must be single reg. */
4670 if (!REG_P (SET_DEST (pat
)))
4673 if (GET_CODE (SET_SRC (pat
)) != PLUS
)
4676 mii
->inc_insn
= insn
;
4677 src
= SET_SRC (pat
);
4678 mii
->inc_input
= XEXP (src
, 0);
4680 if (!REG_P (XEXP (src
, 0)))
4683 if (!rtx_equal_p (SET_DEST (pat
), mii
->mem_reg0
))
4686 cst
= XEXP (src
, 1);
4687 if (!CONST_INT_P (cst
))
4689 mii
->inc_constant
= INTVAL (cst
);
4691 regs_equal
= rtx_equal_p (mii
->inc_input
, mii
->mem_reg0
);
4695 mii
->inc_constant
= -mii
->inc_constant
;
4700 if (regs_equal
&& REGNO (SET_DEST (pat
)) == STACK_POINTER_REGNUM
)
4702 /* Note that the sign has already been reversed for !before_mem. */
4703 #ifdef STACK_GROWS_DOWNWARD
4704 return mii
->inc_constant
> 0;
4706 return mii
->inc_constant
< 0;
4712 /* Once a suitable mem reference has been found and the corresponding data
4713 in MII has been filled in, this function is called to find a suitable
4714 add or inc insn involving the register we found in the memory
4718 find_inc (struct mem_inc_info
*mii
, bool backwards
)
4720 sd_iterator_def sd_it
;
4723 sd_it
= sd_iterator_start (mii
->mem_insn
,
4724 backwards
? SD_LIST_HARD_BACK
: SD_LIST_FORW
);
4725 while (sd_iterator_cond (&sd_it
, &dep
))
4727 dep_node_t node
= DEP_LINK_NODE (*sd_it
.linkp
);
4728 rtx pro
= DEP_PRO (dep
);
4729 rtx con
= DEP_CON (dep
);
4730 rtx inc_cand
= backwards
? pro
: con
;
4731 if (DEP_NONREG (dep
) || DEP_MULTIPLE (dep
))
4733 if (parse_add_or_inc (mii
, inc_cand
, backwards
))
4735 struct dep_replacement
*desc
;
4737 rtx newaddr
, newmem
;
4739 if (sched_verbose
>= 5)
4740 fprintf (sched_dump
, "candidate mem/inc pair: %d %d\n",
4741 INSN_UID (mii
->mem_insn
), INSN_UID (inc_cand
));
4743 /* Need to assure that none of the operands of the inc
4744 instruction are assigned to by the mem insn. */
4745 FOR_EACH_INSN_DEF (def
, mii
->mem_insn
)
4746 if (reg_overlap_mentioned_p (DF_REF_REG (def
), mii
->inc_input
)
4747 || reg_overlap_mentioned_p (DF_REF_REG (def
), mii
->mem_reg0
))
4749 if (sched_verbose
>= 5)
4750 fprintf (sched_dump
,
4751 "inc conflicts with store failure.\n");
4754 newaddr
= mii
->inc_input
;
4755 if (mii
->mem_index
!= NULL_RTX
)
4756 newaddr
= gen_rtx_PLUS (GET_MODE (newaddr
), newaddr
,
4758 newaddr
= plus_constant (GET_MODE (newaddr
), newaddr
,
4759 mii
->mem_constant
+ mii
->inc_constant
);
4760 newmem
= attempt_change (mii
, newaddr
);
4761 if (newmem
== NULL_RTX
)
4763 if (sched_verbose
>= 5)
4764 fprintf (sched_dump
, "successful address replacement\n");
4765 desc
= XCNEW (struct dep_replacement
);
4766 DEP_REPLACE (dep
) = desc
;
4767 desc
->loc
= mii
->mem_loc
;
4768 desc
->newval
= newmem
;
4769 desc
->orig
= *desc
->loc
;
4770 desc
->insn
= mii
->mem_insn
;
4771 move_dep_link (DEP_NODE_BACK (node
), INSN_HARD_BACK_DEPS (con
),
4772 INSN_SPEC_BACK_DEPS (con
));
4775 FOR_EACH_DEP (mii
->inc_insn
, SD_LIST_BACK
, sd_it
, dep
)
4776 add_dependence_1 (mii
->mem_insn
, DEP_PRO (dep
),
4781 FOR_EACH_DEP (mii
->inc_insn
, SD_LIST_FORW
, sd_it
, dep
)
4782 add_dependence_1 (DEP_CON (dep
), mii
->mem_insn
,
4788 sd_iterator_next (&sd_it
);
4793 /* A recursive function that walks ADDRESS_OF_X to find memory references
4794 which could be modified during scheduling. We call find_inc for each
4795 one we find that has a recognizable form. MII holds information about
4796 the pair of memory/increment instructions.
4797 We ensure that every instruction with a memory reference (which will be
4798 the location of the replacement) is assigned at most one breakable
4802 find_mem (struct mem_inc_info
*mii
, rtx
*address_of_x
)
4804 rtx x
= *address_of_x
;
4805 enum rtx_code code
= GET_CODE (x
);
4806 const char *const fmt
= GET_RTX_FORMAT (code
);
4811 rtx reg0
= XEXP (x
, 0);
4813 mii
->mem_loc
= address_of_x
;
4814 mii
->mem_index
= NULL_RTX
;
4815 mii
->mem_constant
= 0;
4816 if (GET_CODE (reg0
) == PLUS
&& CONST_INT_P (XEXP (reg0
, 1)))
4818 mii
->mem_constant
= INTVAL (XEXP (reg0
, 1));
4819 reg0
= XEXP (reg0
, 0);
4821 if (GET_CODE (reg0
) == PLUS
)
4823 mii
->mem_index
= XEXP (reg0
, 1);
4824 reg0
= XEXP (reg0
, 0);
4829 int occurrences
= 0;
4831 /* Make sure this reg appears only once in this insn. Can't use
4832 count_occurrences since that only works for pseudos. */
4833 FOR_EACH_INSN_USE (use
, mii
->mem_insn
)
4834 if (reg_overlap_mentioned_p (reg0
, DF_REF_REG (use
)))
4835 if (++occurrences
> 1)
4837 if (sched_verbose
>= 5)
4838 fprintf (sched_dump
, "mem count failure\n");
4842 mii
->mem_reg0
= reg0
;
4843 return find_inc (mii
, true) || find_inc (mii
, false);
4848 if (code
== SIGN_EXTRACT
|| code
== ZERO_EXTRACT
)
4850 /* If REG occurs inside a MEM used in a bit-field reference,
4851 that is unacceptable. */
4855 /* Time for some deep diving. */
4856 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
4860 if (find_mem (mii
, &XEXP (x
, i
)))
4863 else if (fmt
[i
] == 'E')
4866 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
4867 if (find_mem (mii
, &XVECEXP (x
, i
, j
)))
4875 /* Examine the instructions between HEAD and TAIL and try to find
4876 dependencies that can be broken by modifying one of the patterns. */
4879 find_modifiable_mems (rtx head
, rtx tail
)
4881 rtx insn
, next_tail
= NEXT_INSN (tail
);
4882 int success_in_block
= 0;
4884 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
4886 struct mem_inc_info mii
;
4888 if (!NONDEBUG_INSN_P (insn
) || RTX_FRAME_RELATED_P (insn
))
4891 mii
.mem_insn
= insn
;
4892 if (find_mem (&mii
, &PATTERN (insn
)))
4895 if (success_in_block
&& sched_verbose
>= 5)
4896 fprintf (sched_dump
, "%d candidates for address modification found.\n",
4900 #endif /* INSN_SCHEDULING */