1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
7 and currently maintained by, Jim Wilson (wilson@cygnus.com)
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "hard-reg-set.h"
36 #include "insn-config.h"
37 #include "insn-attr.h"
41 #include "sched-int.h"
47 #ifdef INSN_SCHEDULING
49 #ifdef ENABLE_CHECKING
55 /* Holds current parameters for the dependency analyzer. */
56 struct sched_deps_info_def
*sched_deps_info
;
58 /* The data is specific to the Haifa scheduler. */
59 VEC(haifa_deps_insn_data_def
, heap
) *h_d_i_d
= NULL
;
61 /* Return the major type present in the DS. */
69 return REG_DEP_OUTPUT
;
71 gcc_assert (ds
& DEP_ANTI
);
76 /* Return equivalent dep_status. */
78 dk_to_ds (enum reg_note dk
)
89 gcc_assert (dk
== REG_DEP_ANTI
);
94 /* Functions to operate with dependence information container - dep_t. */
96 /* Init DEP with the arguments. */
98 init_dep_1 (dep_t dep
, rtx pro
, rtx con
, enum reg_note type
, ds_t ds
)
102 DEP_TYPE (dep
) = type
;
103 DEP_STATUS (dep
) = ds
;
106 /* Init DEP with the arguments.
107 While most of the scheduler (including targets) only need the major type
108 of the dependency, it is convenient to hide full dep_status from them. */
110 init_dep (dep_t dep
, rtx pro
, rtx con
, enum reg_note kind
)
114 if ((current_sched_info
->flags
& USE_DEPS_LIST
))
115 ds
= dk_to_ds (kind
);
119 init_dep_1 (dep
, pro
, con
, kind
, ds
);
122 /* Make a copy of FROM in TO. */
124 copy_dep (dep_t to
, dep_t from
)
126 memcpy (to
, from
, sizeof (*to
));
129 static void dump_ds (FILE *, ds_t
);
131 /* Define flags for dump_dep (). */
133 /* Dump producer of the dependence. */
134 #define DUMP_DEP_PRO (2)
136 /* Dump consumer of the dependence. */
137 #define DUMP_DEP_CON (4)
139 /* Dump type of the dependence. */
140 #define DUMP_DEP_TYPE (8)
142 /* Dump status of the dependence. */
143 #define DUMP_DEP_STATUS (16)
145 /* Dump all information about the dependence. */
146 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
150 FLAGS is a bit mask specifying what information about DEP needs
152 If FLAGS has the very first bit set, then dump all information about DEP
153 and propagate this bit into the callee dump functions. */
155 dump_dep (FILE *dump
, dep_t dep
, int flags
)
158 flags
|= DUMP_DEP_ALL
;
162 if (flags
& DUMP_DEP_PRO
)
163 fprintf (dump
, "%d; ", INSN_UID (DEP_PRO (dep
)));
165 if (flags
& DUMP_DEP_CON
)
166 fprintf (dump
, "%d; ", INSN_UID (DEP_CON (dep
)));
168 if (flags
& DUMP_DEP_TYPE
)
171 enum reg_note type
= DEP_TYPE (dep
);
192 fprintf (dump
, "%c; ", t
);
195 if (flags
& DUMP_DEP_STATUS
)
197 if (current_sched_info
->flags
& USE_DEPS_LIST
)
198 dump_ds (dump
, DEP_STATUS (dep
));
204 /* Default flags for dump_dep (). */
205 static int dump_dep_flags
= (DUMP_DEP_PRO
| DUMP_DEP_CON
);
207 /* Dump all fields of DEP to STDERR. */
209 sd_debug_dep (dep_t dep
)
211 dump_dep (stderr
, dep
, 1);
212 fprintf (stderr
, "\n");
215 /* Determine whether DEP is a dependency link of a non-debug insn on a
219 depl_on_debug_p (dep_link_t dep
)
221 return (DEBUG_INSN_P (DEP_LINK_PRO (dep
))
222 && !DEBUG_INSN_P (DEP_LINK_CON (dep
)));
225 /* Functions to operate with a single link from the dependencies lists -
228 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
231 attach_dep_link (dep_link_t l
, dep_link_t
*prev_nextp
)
233 dep_link_t next
= *prev_nextp
;
235 gcc_assert (DEP_LINK_PREV_NEXTP (l
) == NULL
236 && DEP_LINK_NEXT (l
) == NULL
);
238 /* Init node being inserted. */
239 DEP_LINK_PREV_NEXTP (l
) = prev_nextp
;
240 DEP_LINK_NEXT (l
) = next
;
245 gcc_assert (DEP_LINK_PREV_NEXTP (next
) == prev_nextp
);
247 DEP_LINK_PREV_NEXTP (next
) = &DEP_LINK_NEXT (l
);
254 /* Add dep_link LINK to deps_list L. */
256 add_to_deps_list (dep_link_t link
, deps_list_t l
)
258 attach_dep_link (link
, &DEPS_LIST_FIRST (l
));
260 /* Don't count debug deps. */
261 if (!depl_on_debug_p (link
))
262 ++DEPS_LIST_N_LINKS (l
);
265 /* Detach dep_link L from the list. */
267 detach_dep_link (dep_link_t l
)
269 dep_link_t
*prev_nextp
= DEP_LINK_PREV_NEXTP (l
);
270 dep_link_t next
= DEP_LINK_NEXT (l
);
275 DEP_LINK_PREV_NEXTP (next
) = prev_nextp
;
277 DEP_LINK_PREV_NEXTP (l
) = NULL
;
278 DEP_LINK_NEXT (l
) = NULL
;
281 /* Remove link LINK from list LIST. */
283 remove_from_deps_list (dep_link_t link
, deps_list_t list
)
285 detach_dep_link (link
);
287 /* Don't count debug deps. */
288 if (!depl_on_debug_p (link
))
289 --DEPS_LIST_N_LINKS (list
);
292 /* Move link LINK from list FROM to list TO. */
294 move_dep_link (dep_link_t link
, deps_list_t from
, deps_list_t to
)
296 remove_from_deps_list (link
, from
);
297 add_to_deps_list (link
, to
);
300 /* Return true of LINK is not attached to any list. */
302 dep_link_is_detached_p (dep_link_t link
)
304 return DEP_LINK_PREV_NEXTP (link
) == NULL
;
307 /* Pool to hold all dependency nodes (dep_node_t). */
308 static alloc_pool dn_pool
;
310 /* Number of dep_nodes out there. */
311 static int dn_pool_diff
= 0;
313 /* Create a dep_node. */
315 create_dep_node (void)
317 dep_node_t n
= (dep_node_t
) pool_alloc (dn_pool
);
318 dep_link_t back
= DEP_NODE_BACK (n
);
319 dep_link_t forw
= DEP_NODE_FORW (n
);
321 DEP_LINK_NODE (back
) = n
;
322 DEP_LINK_NEXT (back
) = NULL
;
323 DEP_LINK_PREV_NEXTP (back
) = NULL
;
325 DEP_LINK_NODE (forw
) = n
;
326 DEP_LINK_NEXT (forw
) = NULL
;
327 DEP_LINK_PREV_NEXTP (forw
) = NULL
;
334 /* Delete dep_node N. N must not be connected to any deps_list. */
336 delete_dep_node (dep_node_t n
)
338 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n
))
339 && dep_link_is_detached_p (DEP_NODE_FORW (n
)));
343 pool_free (dn_pool
, n
);
346 /* Pool to hold dependencies lists (deps_list_t). */
347 static alloc_pool dl_pool
;
349 /* Number of deps_lists out there. */
350 static int dl_pool_diff
= 0;
352 /* Functions to operate with dependences lists - deps_list_t. */
354 /* Return true if list L is empty. */
356 deps_list_empty_p (deps_list_t l
)
358 return DEPS_LIST_N_LINKS (l
) == 0;
361 /* Create a new deps_list. */
363 create_deps_list (void)
365 deps_list_t l
= (deps_list_t
) pool_alloc (dl_pool
);
367 DEPS_LIST_FIRST (l
) = NULL
;
368 DEPS_LIST_N_LINKS (l
) = 0;
374 /* Free deps_list L. */
376 free_deps_list (deps_list_t l
)
378 gcc_assert (deps_list_empty_p (l
));
382 pool_free (dl_pool
, l
);
385 /* Return true if there is no dep_nodes and deps_lists out there.
386 After the region is scheduled all the dependency nodes and lists
387 should [generally] be returned to pool. */
389 deps_pools_are_empty_p (void)
391 return dn_pool_diff
== 0 && dl_pool_diff
== 0;
394 /* Remove all elements from L. */
396 clear_deps_list (deps_list_t l
)
400 dep_link_t link
= DEPS_LIST_FIRST (l
);
405 remove_from_deps_list (link
, l
);
410 static regset reg_pending_sets
;
411 static regset reg_pending_clobbers
;
412 static regset reg_pending_uses
;
413 static enum reg_pending_barrier_mode reg_pending_barrier
;
415 /* Hard registers implicitly clobbered or used (or may be implicitly
416 clobbered or used) by the currently analyzed insn. For example,
417 insn in its constraint has one register class. Even if there is
418 currently no hard register in the insn, the particular hard
419 register will be in the insn after reload pass because the
420 constraint requires it. */
421 static HARD_REG_SET implicit_reg_pending_clobbers
;
422 static HARD_REG_SET implicit_reg_pending_uses
;
424 /* To speed up the test for duplicate dependency links we keep a
425 record of dependencies created by add_dependence when the average
426 number of instructions in a basic block is very large.
428 Studies have shown that there is typically around 5 instructions between
429 branches for typical C code. So we can make a guess that the average
430 basic block is approximately 5 instructions long; we will choose 100X
431 the average size as a very large basic block.
433 Each insn has associated bitmaps for its dependencies. Each bitmap
434 has enough entries to represent a dependency on any other insn in
435 the insn chain. All bitmap for true dependencies cache is
436 allocated then the rest two ones are also allocated. */
437 static bitmap_head
*true_dependency_cache
= NULL
;
438 static bitmap_head
*output_dependency_cache
= NULL
;
439 static bitmap_head
*anti_dependency_cache
= NULL
;
440 static bitmap_head
*spec_dependency_cache
= NULL
;
441 static int cache_size
;
443 static int deps_may_trap_p (const_rtx
);
444 static void add_dependence_list (rtx
, rtx
, int, enum reg_note
);
445 static void add_dependence_list_and_free (struct deps
*, rtx
,
446 rtx
*, int, enum reg_note
);
447 static void delete_all_dependences (rtx
);
448 static void fixup_sched_groups (rtx
);
450 static void flush_pending_lists (struct deps
*, rtx
, int, int);
451 static void sched_analyze_1 (struct deps
*, rtx
, rtx
);
452 static void sched_analyze_2 (struct deps
*, rtx
, rtx
);
453 static void sched_analyze_insn (struct deps
*, rtx
, rtx
);
455 static bool sched_has_condition_p (const_rtx
);
456 static int conditions_mutex_p (const_rtx
, const_rtx
, bool, bool);
458 static enum DEPS_ADJUST_RESULT
maybe_add_or_update_dep_1 (dep_t
, bool,
460 static enum DEPS_ADJUST_RESULT
add_or_update_dep_1 (dep_t
, bool, rtx
, rtx
);
462 #ifdef ENABLE_CHECKING
463 static void check_dep (dep_t
, bool);
466 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
469 deps_may_trap_p (const_rtx mem
)
471 const_rtx addr
= XEXP (mem
, 0);
473 if (REG_P (addr
) && REGNO (addr
) >= FIRST_PSEUDO_REGISTER
)
475 const_rtx t
= get_reg_known_value (REGNO (addr
));
479 return rtx_addr_can_trap_p (addr
);
483 /* Find the condition under which INSN is executed. If REV is not NULL,
484 it is set to TRUE when the returned comparison should be reversed
485 to get the actual condition. */
487 sched_get_condition_with_rev (const_rtx insn
, bool *rev
)
489 rtx pat
= PATTERN (insn
);
498 if (GET_CODE (pat
) == COND_EXEC
)
499 return COND_EXEC_TEST (pat
);
501 if (!any_condjump_p (insn
) || !onlyjump_p (insn
))
504 src
= SET_SRC (pc_set (insn
));
506 if (XEXP (src
, 2) == pc_rtx
)
507 return XEXP (src
, 0);
508 else if (XEXP (src
, 1) == pc_rtx
)
510 rtx cond
= XEXP (src
, 0);
511 enum rtx_code revcode
= reversed_comparison_code (cond
, insn
);
513 if (revcode
== UNKNOWN
)
524 /* True when we can find a condition under which INSN is executed. */
526 sched_has_condition_p (const_rtx insn
)
528 return !! sched_get_condition_with_rev (insn
, NULL
);
533 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
535 conditions_mutex_p (const_rtx cond1
, const_rtx cond2
, bool rev1
, bool rev2
)
537 if (COMPARISON_P (cond1
)
538 && COMPARISON_P (cond2
)
539 && GET_CODE (cond1
) ==
541 ? reversed_comparison_code (cond2
, NULL
)
543 && XEXP (cond1
, 0) == XEXP (cond2
, 0)
544 && XEXP (cond1
, 1) == XEXP (cond2
, 1))
549 /* Return true if insn1 and insn2 can never depend on one another because
550 the conditions under which they are executed are mutually exclusive. */
552 sched_insns_conditions_mutex_p (const_rtx insn1
, const_rtx insn2
)
555 bool rev1
= false, rev2
= false;
557 /* df doesn't handle conditional lifetimes entirely correctly;
558 calls mess up the conditional lifetimes. */
559 if (!CALL_P (insn1
) && !CALL_P (insn2
))
561 cond1
= sched_get_condition_with_rev (insn1
, &rev1
);
562 cond2
= sched_get_condition_with_rev (insn2
, &rev2
);
564 && conditions_mutex_p (cond1
, cond2
, rev1
, rev2
)
565 /* Make sure first instruction doesn't affect condition of second
566 instruction if switched. */
567 && !modified_in_p (cond1
, insn2
)
568 /* Make sure second instruction doesn't affect condition of first
569 instruction if switched. */
570 && !modified_in_p (cond2
, insn1
))
577 /* Return true if INSN can potentially be speculated with type DS. */
579 sched_insn_is_legitimate_for_speculation_p (const_rtx insn
, ds_t ds
)
581 if (HAS_INTERNAL_DEP (insn
))
584 if (!NONJUMP_INSN_P (insn
))
587 if (SCHED_GROUP_P (insn
))
590 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX (insn
)))
593 if (side_effects_p (PATTERN (insn
)))
597 /* The following instructions, which depend on a speculatively scheduled
598 instruction, cannot be speculatively scheduled along. */
600 if (may_trap_p (PATTERN (insn
)))
601 /* If instruction might trap, it cannot be speculatively scheduled.
602 For control speculation it's obvious why and for data speculation
603 it's because the insn might get wrong input if speculation
604 wasn't successful. */
607 if ((ds
& BE_IN_DATA
)
608 && sched_has_condition_p (insn
))
609 /* If this is a predicated instruction, then it cannot be
610 speculatively scheduled. See PR35659. */
617 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
618 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
619 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
620 This function is used to switch sd_iterator to the next list.
621 !!! For internal use only. Might consider moving it to sched-int.h. */
623 sd_next_list (const_rtx insn
, sd_list_types_def
*types_ptr
,
624 deps_list_t
*list_ptr
, bool *resolved_p_ptr
)
626 sd_list_types_def types
= *types_ptr
;
628 if (types
& SD_LIST_HARD_BACK
)
630 *list_ptr
= INSN_HARD_BACK_DEPS (insn
);
631 *resolved_p_ptr
= false;
632 *types_ptr
= types
& ~SD_LIST_HARD_BACK
;
634 else if (types
& SD_LIST_SPEC_BACK
)
636 *list_ptr
= INSN_SPEC_BACK_DEPS (insn
);
637 *resolved_p_ptr
= false;
638 *types_ptr
= types
& ~SD_LIST_SPEC_BACK
;
640 else if (types
& SD_LIST_FORW
)
642 *list_ptr
= INSN_FORW_DEPS (insn
);
643 *resolved_p_ptr
= false;
644 *types_ptr
= types
& ~SD_LIST_FORW
;
646 else if (types
& SD_LIST_RES_BACK
)
648 *list_ptr
= INSN_RESOLVED_BACK_DEPS (insn
);
649 *resolved_p_ptr
= true;
650 *types_ptr
= types
& ~SD_LIST_RES_BACK
;
652 else if (types
& SD_LIST_RES_FORW
)
654 *list_ptr
= INSN_RESOLVED_FORW_DEPS (insn
);
655 *resolved_p_ptr
= true;
656 *types_ptr
= types
& ~SD_LIST_RES_FORW
;
661 *resolved_p_ptr
= false;
662 *types_ptr
= SD_LIST_NONE
;
666 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
668 sd_lists_size (const_rtx insn
, sd_list_types_def list_types
)
672 while (list_types
!= SD_LIST_NONE
)
677 sd_next_list (insn
, &list_types
, &list
, &resolved_p
);
679 size
+= DEPS_LIST_N_LINKS (list
);
685 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
688 sd_lists_empty_p (const_rtx insn
, sd_list_types_def list_types
)
690 while (list_types
!= SD_LIST_NONE
)
695 sd_next_list (insn
, &list_types
, &list
, &resolved_p
);
696 if (!deps_list_empty_p (list
))
703 /* Initialize data for INSN. */
705 sd_init_insn (rtx insn
)
707 INSN_HARD_BACK_DEPS (insn
) = create_deps_list ();
708 INSN_SPEC_BACK_DEPS (insn
) = create_deps_list ();
709 INSN_RESOLVED_BACK_DEPS (insn
) = create_deps_list ();
710 INSN_FORW_DEPS (insn
) = create_deps_list ();
711 INSN_RESOLVED_FORW_DEPS (insn
) = create_deps_list ();
713 if (DEBUG_INSN_P (insn
))
714 DEBUG_INSN_SCHED_P (insn
) = TRUE
;
716 /* ??? It would be nice to allocate dependency caches here. */
719 /* Free data for INSN. */
721 sd_finish_insn (rtx insn
)
723 /* ??? It would be nice to deallocate dependency caches here. */
725 if (DEBUG_INSN_P (insn
))
727 gcc_assert (DEBUG_INSN_SCHED_P (insn
));
728 DEBUG_INSN_SCHED_P (insn
) = FALSE
;
731 free_deps_list (INSN_HARD_BACK_DEPS (insn
));
732 INSN_HARD_BACK_DEPS (insn
) = NULL
;
734 free_deps_list (INSN_SPEC_BACK_DEPS (insn
));
735 INSN_SPEC_BACK_DEPS (insn
) = NULL
;
737 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn
));
738 INSN_RESOLVED_BACK_DEPS (insn
) = NULL
;
740 free_deps_list (INSN_FORW_DEPS (insn
));
741 INSN_FORW_DEPS (insn
) = NULL
;
743 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn
));
744 INSN_RESOLVED_FORW_DEPS (insn
) = NULL
;
747 /* Find a dependency between producer PRO and consumer CON.
748 Search through resolved dependency lists if RESOLVED_P is true.
749 If no such dependency is found return NULL,
750 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
751 with an iterator pointing to it. */
753 sd_find_dep_between_no_cache (rtx pro
, rtx con
, bool resolved_p
,
754 sd_iterator_def
*sd_it_ptr
)
756 sd_list_types_def pro_list_type
;
757 sd_list_types_def con_list_type
;
758 sd_iterator_def sd_it
;
760 bool found_p
= false;
764 pro_list_type
= SD_LIST_RES_FORW
;
765 con_list_type
= SD_LIST_RES_BACK
;
769 pro_list_type
= SD_LIST_FORW
;
770 con_list_type
= SD_LIST_BACK
;
773 /* Walk through either back list of INSN or forw list of ELEM
774 depending on which one is shorter. */
775 if (sd_lists_size (con
, con_list_type
) < sd_lists_size (pro
, pro_list_type
))
777 /* Find the dep_link with producer PRO in consumer's back_deps. */
778 FOR_EACH_DEP (con
, con_list_type
, sd_it
, dep
)
779 if (DEP_PRO (dep
) == pro
)
787 /* Find the dep_link with consumer CON in producer's forw_deps. */
788 FOR_EACH_DEP (pro
, pro_list_type
, sd_it
, dep
)
789 if (DEP_CON (dep
) == con
)
798 if (sd_it_ptr
!= NULL
)
807 /* Find a dependency between producer PRO and consumer CON.
808 Use dependency [if available] to check if dependency is present at all.
809 Search through resolved dependency lists if RESOLVED_P is true.
810 If the dependency or NULL if none found. */
812 sd_find_dep_between (rtx pro
, rtx con
, bool resolved_p
)
814 if (true_dependency_cache
!= NULL
)
815 /* Avoiding the list walk below can cut compile times dramatically
818 int elem_luid
= INSN_LUID (pro
);
819 int insn_luid
= INSN_LUID (con
);
821 gcc_assert (output_dependency_cache
!= NULL
822 && anti_dependency_cache
!= NULL
);
824 if (!bitmap_bit_p (&true_dependency_cache
[insn_luid
], elem_luid
)
825 && !bitmap_bit_p (&output_dependency_cache
[insn_luid
], elem_luid
)
826 && !bitmap_bit_p (&anti_dependency_cache
[insn_luid
], elem_luid
))
830 return sd_find_dep_between_no_cache (pro
, con
, resolved_p
, NULL
);
833 /* Add or update a dependence described by DEP.
834 MEM1 and MEM2, if non-null, correspond to memory locations in case of
837 The function returns a value indicating if an old entry has been changed
838 or a new entry has been added to insn's backward deps.
840 This function merely checks if producer and consumer is the same insn
841 and doesn't create a dep in this case. Actual manipulation of
842 dependence data structures is performed in add_or_update_dep_1. */
843 static enum DEPS_ADJUST_RESULT
844 maybe_add_or_update_dep_1 (dep_t dep
, bool resolved_p
, rtx mem1
, rtx mem2
)
846 rtx elem
= DEP_PRO (dep
);
847 rtx insn
= DEP_CON (dep
);
849 gcc_assert (INSN_P (insn
) && INSN_P (elem
));
851 /* Don't depend an insn on itself. */
854 if (sched_deps_info
->generate_spec_deps
)
855 /* INSN has an internal dependence, which we can't overcome. */
856 HAS_INTERNAL_DEP (insn
) = 1;
861 return add_or_update_dep_1 (dep
, resolved_p
, mem1
, mem2
);
864 /* Ask dependency caches what needs to be done for dependence DEP.
865 Return DEP_CREATED if new dependence should be created and there is no
866 need to try to find one searching the dependencies lists.
867 Return DEP_PRESENT if there already is a dependence described by DEP and
868 hence nothing is to be done.
869 Return DEP_CHANGED if there already is a dependence, but it should be
870 updated to incorporate additional information from DEP. */
871 static enum DEPS_ADJUST_RESULT
872 ask_dependency_caches (dep_t dep
)
874 int elem_luid
= INSN_LUID (DEP_PRO (dep
));
875 int insn_luid
= INSN_LUID (DEP_CON (dep
));
877 gcc_assert (true_dependency_cache
!= NULL
878 && output_dependency_cache
!= NULL
879 && anti_dependency_cache
!= NULL
);
881 if (!(current_sched_info
->flags
& USE_DEPS_LIST
))
883 enum reg_note present_dep_type
;
885 if (bitmap_bit_p (&true_dependency_cache
[insn_luid
], elem_luid
))
886 present_dep_type
= REG_DEP_TRUE
;
887 else if (bitmap_bit_p (&output_dependency_cache
[insn_luid
], elem_luid
))
888 present_dep_type
= REG_DEP_OUTPUT
;
889 else if (bitmap_bit_p (&anti_dependency_cache
[insn_luid
], elem_luid
))
890 present_dep_type
= REG_DEP_ANTI
;
892 /* There is no existing dep so it should be created. */
895 if ((int) DEP_TYPE (dep
) >= (int) present_dep_type
)
896 /* DEP does not add anything to the existing dependence. */
901 ds_t present_dep_types
= 0;
903 if (bitmap_bit_p (&true_dependency_cache
[insn_luid
], elem_luid
))
904 present_dep_types
|= DEP_TRUE
;
905 if (bitmap_bit_p (&output_dependency_cache
[insn_luid
], elem_luid
))
906 present_dep_types
|= DEP_OUTPUT
;
907 if (bitmap_bit_p (&anti_dependency_cache
[insn_luid
], elem_luid
))
908 present_dep_types
|= DEP_ANTI
;
910 if (present_dep_types
== 0)
911 /* There is no existing dep so it should be created. */
914 if (!(current_sched_info
->flags
& DO_SPECULATION
)
915 || !bitmap_bit_p (&spec_dependency_cache
[insn_luid
], elem_luid
))
917 if ((present_dep_types
| (DEP_STATUS (dep
) & DEP_TYPES
))
918 == present_dep_types
)
919 /* DEP does not add anything to the existing dependence. */
924 /* Only true dependencies can be data speculative and
925 only anti dependencies can be control speculative. */
926 gcc_assert ((present_dep_types
& (DEP_TRUE
| DEP_ANTI
))
927 == present_dep_types
);
929 /* if (DEP is SPECULATIVE) then
930 ..we should update DEP_STATUS
932 ..we should reset existing dep to non-speculative. */
939 /* Set dependency caches according to DEP. */
941 set_dependency_caches (dep_t dep
)
943 int elem_luid
= INSN_LUID (DEP_PRO (dep
));
944 int insn_luid
= INSN_LUID (DEP_CON (dep
));
946 if (!(current_sched_info
->flags
& USE_DEPS_LIST
))
948 switch (DEP_TYPE (dep
))
951 bitmap_set_bit (&true_dependency_cache
[insn_luid
], elem_luid
);
955 bitmap_set_bit (&output_dependency_cache
[insn_luid
], elem_luid
);
959 bitmap_set_bit (&anti_dependency_cache
[insn_luid
], elem_luid
);
968 ds_t ds
= DEP_STATUS (dep
);
971 bitmap_set_bit (&true_dependency_cache
[insn_luid
], elem_luid
);
973 bitmap_set_bit (&output_dependency_cache
[insn_luid
], elem_luid
);
975 bitmap_set_bit (&anti_dependency_cache
[insn_luid
], elem_luid
);
977 if (ds
& SPECULATIVE
)
979 gcc_assert (current_sched_info
->flags
& DO_SPECULATION
);
980 bitmap_set_bit (&spec_dependency_cache
[insn_luid
], elem_luid
);
985 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
986 caches accordingly. */
988 update_dependency_caches (dep_t dep
, enum reg_note old_type
)
990 int elem_luid
= INSN_LUID (DEP_PRO (dep
));
991 int insn_luid
= INSN_LUID (DEP_CON (dep
));
993 /* Clear corresponding cache entry because type of the link
994 may have changed. Keep them if we use_deps_list. */
995 if (!(current_sched_info
->flags
& USE_DEPS_LIST
))
1000 bitmap_clear_bit (&output_dependency_cache
[insn_luid
], elem_luid
);
1004 bitmap_clear_bit (&anti_dependency_cache
[insn_luid
], elem_luid
);
1012 set_dependency_caches (dep
);
1015 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
1017 change_spec_dep_to_hard (sd_iterator_def sd_it
)
1019 dep_node_t node
= DEP_LINK_NODE (*sd_it
.linkp
);
1020 dep_link_t link
= DEP_NODE_BACK (node
);
1021 dep_t dep
= DEP_NODE_DEP (node
);
1022 rtx elem
= DEP_PRO (dep
);
1023 rtx insn
= DEP_CON (dep
);
1025 move_dep_link (link
, INSN_SPEC_BACK_DEPS (insn
), INSN_HARD_BACK_DEPS (insn
));
1027 DEP_STATUS (dep
) &= ~SPECULATIVE
;
1029 if (true_dependency_cache
!= NULL
)
1030 /* Clear the cache entry. */
1031 bitmap_clear_bit (&spec_dependency_cache
[INSN_LUID (insn
)],
1035 /* Update DEP to incorporate information from NEW_DEP.
1036 SD_IT points to DEP in case it should be moved to another list.
1037 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1038 data-speculative dependence should be updated. */
1039 static enum DEPS_ADJUST_RESULT
1040 update_dep (dep_t dep
, dep_t new_dep
,
1041 sd_iterator_def sd_it ATTRIBUTE_UNUSED
,
1042 rtx mem1 ATTRIBUTE_UNUSED
,
1043 rtx mem2 ATTRIBUTE_UNUSED
)
1045 enum DEPS_ADJUST_RESULT res
= DEP_PRESENT
;
1046 enum reg_note old_type
= DEP_TYPE (dep
);
1048 /* If this is a more restrictive type of dependence than the
1049 existing one, then change the existing dependence to this
1051 if ((int) DEP_TYPE (new_dep
) < (int) old_type
)
1053 DEP_TYPE (dep
) = DEP_TYPE (new_dep
);
1057 if (current_sched_info
->flags
& USE_DEPS_LIST
)
1058 /* Update DEP_STATUS. */
1060 ds_t dep_status
= DEP_STATUS (dep
);
1061 ds_t ds
= DEP_STATUS (new_dep
);
1062 ds_t new_status
= ds
| dep_status
;
1064 if (new_status
& SPECULATIVE
)
1065 /* Either existing dep or a dep we're adding or both are
1068 if (!(ds
& SPECULATIVE
)
1069 || !(dep_status
& SPECULATIVE
))
1070 /* The new dep can't be speculative. */
1072 new_status
&= ~SPECULATIVE
;
1074 if (dep_status
& SPECULATIVE
)
1075 /* The old dep was speculative, but now it
1077 change_spec_dep_to_hard (sd_it
);
1081 /* Both are speculative. Merge probabilities. */
1086 dw
= estimate_dep_weak (mem1
, mem2
);
1087 ds
= set_dep_weak (ds
, BEGIN_DATA
, dw
);
1090 new_status
= ds_merge (dep_status
, ds
);
1096 if (dep_status
!= ds
)
1098 DEP_STATUS (dep
) = ds
;
1103 if (true_dependency_cache
!= NULL
1104 && res
== DEP_CHANGED
)
1105 update_dependency_caches (dep
, old_type
);
1110 /* Add or update a dependence described by DEP.
1111 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1114 The function returns a value indicating if an old entry has been changed
1115 or a new entry has been added to insn's backward deps or nothing has
1116 been updated at all. */
1117 static enum DEPS_ADJUST_RESULT
1118 add_or_update_dep_1 (dep_t new_dep
, bool resolved_p
,
1119 rtx mem1 ATTRIBUTE_UNUSED
, rtx mem2 ATTRIBUTE_UNUSED
)
1121 bool maybe_present_p
= true;
1122 bool present_p
= false;
1124 gcc_assert (INSN_P (DEP_PRO (new_dep
)) && INSN_P (DEP_CON (new_dep
))
1125 && DEP_PRO (new_dep
) != DEP_CON (new_dep
));
1127 #ifdef ENABLE_CHECKING
1128 check_dep (new_dep
, mem1
!= NULL
);
1131 if (true_dependency_cache
!= NULL
)
1133 switch (ask_dependency_caches (new_dep
))
1139 maybe_present_p
= true;
1144 maybe_present_p
= false;
1154 /* Check that we don't already have this dependence. */
1155 if (maybe_present_p
)
1158 sd_iterator_def sd_it
;
1160 gcc_assert (true_dependency_cache
== NULL
|| present_p
);
1162 present_dep
= sd_find_dep_between_no_cache (DEP_PRO (new_dep
),
1164 resolved_p
, &sd_it
);
1166 if (present_dep
!= NULL
)
1167 /* We found an existing dependency between ELEM and INSN. */
1168 return update_dep (present_dep
, new_dep
, sd_it
, mem1
, mem2
);
1170 /* We didn't find a dep, it shouldn't present in the cache. */
1171 gcc_assert (!present_p
);
1174 /* Might want to check one level of transitivity to save conses.
1175 This check should be done in maybe_add_or_update_dep_1.
1176 Since we made it to add_or_update_dep_1, we must create
1177 (or update) a link. */
1179 if (mem1
!= NULL_RTX
)
1181 gcc_assert (sched_deps_info
->generate_spec_deps
);
1182 DEP_STATUS (new_dep
) = set_dep_weak (DEP_STATUS (new_dep
), BEGIN_DATA
,
1183 estimate_dep_weak (mem1
, mem2
));
1186 sd_add_dep (new_dep
, resolved_p
);
1191 /* Initialize BACK_LIST_PTR with consumer's backward list and
1192 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1193 initialize with lists that hold resolved deps. */
1195 get_back_and_forw_lists (dep_t dep
, bool resolved_p
,
1196 deps_list_t
*back_list_ptr
,
1197 deps_list_t
*forw_list_ptr
)
1199 rtx con
= DEP_CON (dep
);
1203 if ((current_sched_info
->flags
& DO_SPECULATION
)
1204 && (DEP_STATUS (dep
) & SPECULATIVE
))
1205 *back_list_ptr
= INSN_SPEC_BACK_DEPS (con
);
1207 *back_list_ptr
= INSN_HARD_BACK_DEPS (con
);
1209 *forw_list_ptr
= INSN_FORW_DEPS (DEP_PRO (dep
));
1213 *back_list_ptr
= INSN_RESOLVED_BACK_DEPS (con
);
1214 *forw_list_ptr
= INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep
));
1218 /* Add dependence described by DEP.
1219 If RESOLVED_P is true treat the dependence as a resolved one. */
1221 sd_add_dep (dep_t dep
, bool resolved_p
)
1223 dep_node_t n
= create_dep_node ();
1224 deps_list_t con_back_deps
;
1225 deps_list_t pro_forw_deps
;
1226 rtx elem
= DEP_PRO (dep
);
1227 rtx insn
= DEP_CON (dep
);
1229 gcc_assert (INSN_P (insn
) && INSN_P (elem
) && insn
!= elem
);
1231 if ((current_sched_info
->flags
& DO_SPECULATION
)
1232 && !sched_insn_is_legitimate_for_speculation_p (insn
, DEP_STATUS (dep
)))
1233 DEP_STATUS (dep
) &= ~SPECULATIVE
;
1235 copy_dep (DEP_NODE_DEP (n
), dep
);
1237 get_back_and_forw_lists (dep
, resolved_p
, &con_back_deps
, &pro_forw_deps
);
1239 add_to_deps_list (DEP_NODE_BACK (n
), con_back_deps
);
1241 #ifdef ENABLE_CHECKING
1242 check_dep (dep
, false);
1245 add_to_deps_list (DEP_NODE_FORW (n
), pro_forw_deps
);
1247 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1248 in the bitmap caches of dependency information. */
1249 if (true_dependency_cache
!= NULL
)
1250 set_dependency_caches (dep
);
1253 /* Add or update backward dependence between INSN and ELEM
1254 with given type DEP_TYPE and dep_status DS.
1255 This function is a convenience wrapper. */
1256 enum DEPS_ADJUST_RESULT
1257 sd_add_or_update_dep (dep_t dep
, bool resolved_p
)
1259 return add_or_update_dep_1 (dep
, resolved_p
, NULL_RTX
, NULL_RTX
);
1262 /* Resolved dependence pointed to by SD_IT.
1263 SD_IT will advance to the next element. */
1265 sd_resolve_dep (sd_iterator_def sd_it
)
1267 dep_node_t node
= DEP_LINK_NODE (*sd_it
.linkp
);
1268 dep_t dep
= DEP_NODE_DEP (node
);
1269 rtx pro
= DEP_PRO (dep
);
1270 rtx con
= DEP_CON (dep
);
1272 if ((current_sched_info
->flags
& DO_SPECULATION
)
1273 && (DEP_STATUS (dep
) & SPECULATIVE
))
1274 move_dep_link (DEP_NODE_BACK (node
), INSN_SPEC_BACK_DEPS (con
),
1275 INSN_RESOLVED_BACK_DEPS (con
));
1277 move_dep_link (DEP_NODE_BACK (node
), INSN_HARD_BACK_DEPS (con
),
1278 INSN_RESOLVED_BACK_DEPS (con
));
1280 move_dep_link (DEP_NODE_FORW (node
), INSN_FORW_DEPS (pro
),
1281 INSN_RESOLVED_FORW_DEPS (pro
));
1284 /* Make TO depend on all the FROM's producers.
1285 If RESOLVED_P is true add dependencies to the resolved lists. */
1287 sd_copy_back_deps (rtx to
, rtx from
, bool resolved_p
)
1289 sd_list_types_def list_type
;
1290 sd_iterator_def sd_it
;
1293 list_type
= resolved_p
? SD_LIST_RES_BACK
: SD_LIST_BACK
;
1295 FOR_EACH_DEP (from
, list_type
, sd_it
, dep
)
1297 dep_def _new_dep
, *new_dep
= &_new_dep
;
1299 copy_dep (new_dep
, dep
);
1300 DEP_CON (new_dep
) = to
;
1301 sd_add_dep (new_dep
, resolved_p
);
1305 /* Remove a dependency referred to by SD_IT.
1306 SD_IT will point to the next dependence after removal. */
1308 sd_delete_dep (sd_iterator_def sd_it
)
1310 dep_node_t n
= DEP_LINK_NODE (*sd_it
.linkp
);
1311 dep_t dep
= DEP_NODE_DEP (n
);
1312 rtx pro
= DEP_PRO (dep
);
1313 rtx con
= DEP_CON (dep
);
1314 deps_list_t con_back_deps
;
1315 deps_list_t pro_forw_deps
;
1317 if (true_dependency_cache
!= NULL
)
1319 int elem_luid
= INSN_LUID (pro
);
1320 int insn_luid
= INSN_LUID (con
);
1322 bitmap_clear_bit (&true_dependency_cache
[insn_luid
], elem_luid
);
1323 bitmap_clear_bit (&anti_dependency_cache
[insn_luid
], elem_luid
);
1324 bitmap_clear_bit (&output_dependency_cache
[insn_luid
], elem_luid
);
1326 if (current_sched_info
->flags
& DO_SPECULATION
)
1327 bitmap_clear_bit (&spec_dependency_cache
[insn_luid
], elem_luid
);
1330 get_back_and_forw_lists (dep
, sd_it
.resolved_p
,
1331 &con_back_deps
, &pro_forw_deps
);
1333 remove_from_deps_list (DEP_NODE_BACK (n
), con_back_deps
);
1334 remove_from_deps_list (DEP_NODE_FORW (n
), pro_forw_deps
);
1336 delete_dep_node (n
);
1339 /* Dump size of the lists. */
1340 #define DUMP_LISTS_SIZE (2)
1342 /* Dump dependencies of the lists. */
1343 #define DUMP_LISTS_DEPS (4)
1345 /* Dump all information about the lists. */
1346 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1348 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1349 FLAGS is a bit mask specifying what information about the lists needs
1351 If FLAGS has the very first bit set, then dump all information about
1352 the lists and propagate this bit into the callee dump functions. */
1354 dump_lists (FILE *dump
, rtx insn
, sd_list_types_def types
, int flags
)
1356 sd_iterator_def sd_it
;
1363 flags
|= DUMP_LISTS_ALL
;
1365 fprintf (dump
, "[");
1367 if (flags
& DUMP_LISTS_SIZE
)
1368 fprintf (dump
, "%d; ", sd_lists_size (insn
, types
));
1370 if (flags
& DUMP_LISTS_DEPS
)
1372 FOR_EACH_DEP (insn
, types
, sd_it
, dep
)
1374 dump_dep (dump
, dep
, dump_dep_flags
| all
);
1375 fprintf (dump
, " ");
1380 /* Dump all information about deps_lists of INSN specified by TYPES
1383 sd_debug_lists (rtx insn
, sd_list_types_def types
)
1385 dump_lists (stderr
, insn
, types
, 1);
1386 fprintf (stderr
, "\n");
1389 /* A convenience wrapper to operate on an entire list. */
1392 add_dependence_list (rtx insn
, rtx list
, int uncond
, enum reg_note dep_type
)
1394 for (; list
; list
= XEXP (list
, 1))
1396 if (uncond
|| ! sched_insns_conditions_mutex_p (insn
, XEXP (list
, 0)))
1397 add_dependence (insn
, XEXP (list
, 0), dep_type
);
1401 /* Similar, but free *LISTP at the same time, when the context
1405 add_dependence_list_and_free (struct deps
*deps
, rtx insn
, rtx
*listp
,
1406 int uncond
, enum reg_note dep_type
)
1412 add_dependence_list (insn
, *listp
, uncond
, dep_type
);
1416 for (list
= *listp
, *listp
= NULL
; list
; list
= next
)
1418 next
= XEXP (list
, 1);
1419 if (uncond
|| ! sched_insns_conditions_mutex_p (insn
, XEXP (list
, 0)))
1420 add_dependence (insn
, XEXP (list
, 0), dep_type
);
1421 free_INSN_LIST_node (list
);
1425 /* Remove all occurences of INSN from LIST. Return the number of
1426 occurences removed. */
1429 remove_from_dependence_list (rtx insn
, rtx
* listp
)
1435 if (XEXP (*listp
, 0) == insn
)
1437 remove_free_INSN_LIST_node (listp
);
1442 listp
= &XEXP (*listp
, 1);
1448 /* Same as above, but process two lists at once. */
1450 remove_from_both_dependence_lists (rtx insn
, rtx
*listp
, rtx
*exprp
)
1456 if (XEXP (*listp
, 0) == insn
)
1458 remove_free_INSN_LIST_node (listp
);
1459 remove_free_EXPR_LIST_node (exprp
);
1464 listp
= &XEXP (*listp
, 1);
1465 exprp
= &XEXP (*exprp
, 1);
1471 /* Clear all dependencies for an insn. */
1473 delete_all_dependences (rtx insn
)
1475 sd_iterator_def sd_it
;
1478 /* The below cycle can be optimized to clear the caches and back_deps
1479 in one call but that would provoke duplication of code from
1482 for (sd_it
= sd_iterator_start (insn
, SD_LIST_BACK
);
1483 sd_iterator_cond (&sd_it
, &dep
);)
1484 sd_delete_dep (sd_it
);
1487 /* All insns in a scheduling group except the first should only have
1488 dependencies on the previous insn in the group. So we find the
1489 first instruction in the scheduling group by walking the dependence
1490 chains backwards. Then we add the dependencies for the group to
1491 the previous nonnote insn. */
1494 fixup_sched_groups (rtx insn
)
1496 sd_iterator_def sd_it
;
1500 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
1503 rtx pro
= DEP_PRO (dep
);
1507 i
= prev_nonnote_insn (i
);
1511 } while (SCHED_GROUP_P (i
) || DEBUG_INSN_P (i
));
1513 if (! sched_insns_conditions_mutex_p (i
, pro
))
1514 add_dependence (i
, pro
, DEP_TYPE (dep
));
1518 delete_all_dependences (insn
);
1520 prev_nonnote
= prev_nonnote_insn (insn
);
1521 while (DEBUG_INSN_P (prev_nonnote
))
1522 prev_nonnote
= prev_nonnote_insn (prev_nonnote
);
1523 if (BLOCK_FOR_INSN (insn
) == BLOCK_FOR_INSN (prev_nonnote
)
1524 && ! sched_insns_conditions_mutex_p (insn
, prev_nonnote
))
1525 add_dependence (insn
, prev_nonnote
, REG_DEP_ANTI
);
1528 /* Process an insn's memory dependencies. There are four kinds of
1531 (0) read dependence: read follows read
1532 (1) true dependence: read follows write
1533 (2) output dependence: write follows write
1534 (3) anti dependence: write follows read
1536 We are careful to build only dependencies which actually exist, and
1537 use transitivity to avoid building too many links. */
1539 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1540 The MEM is a memory reference contained within INSN, which we are saving
1541 so that we can do memory aliasing on it. */
1544 add_insn_mem_dependence (struct deps
*deps
, bool read_p
,
1551 gcc_assert (!deps
->readonly
);
1554 insn_list
= &deps
->pending_read_insns
;
1555 mem_list
= &deps
->pending_read_mems
;
1556 if (!DEBUG_INSN_P (insn
))
1557 deps
->pending_read_list_length
++;
1561 insn_list
= &deps
->pending_write_insns
;
1562 mem_list
= &deps
->pending_write_mems
;
1563 deps
->pending_write_list_length
++;
1566 link
= alloc_INSN_LIST (insn
, *insn_list
);
1569 if (sched_deps_info
->use_cselib
)
1571 mem
= shallow_copy_rtx (mem
);
1572 XEXP (mem
, 0) = cselib_subst_to_values (XEXP (mem
, 0));
1574 link
= alloc_EXPR_LIST (VOIDmode
, canon_rtx (mem
), *mem_list
);
1578 /* Make a dependency between every memory reference on the pending lists
1579 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1580 dependencies for a read operation, similarly with FOR_WRITE. */
1583 flush_pending_lists (struct deps
*deps
, rtx insn
, int for_read
,
1588 add_dependence_list_and_free (deps
, insn
, &deps
->pending_read_insns
,
1590 if (!deps
->readonly
)
1592 free_EXPR_LIST_list (&deps
->pending_read_mems
);
1593 deps
->pending_read_list_length
= 0;
1597 add_dependence_list_and_free (deps
, insn
, &deps
->pending_write_insns
, 1,
1598 for_read
? REG_DEP_ANTI
: REG_DEP_OUTPUT
);
1600 add_dependence_list_and_free (deps
, insn
,
1601 &deps
->last_pending_memory_flush
, 1,
1602 for_read
? REG_DEP_ANTI
: REG_DEP_OUTPUT
);
1603 if (!deps
->readonly
)
1605 free_EXPR_LIST_list (&deps
->pending_write_mems
);
1606 deps
->pending_write_list_length
= 0;
1608 deps
->last_pending_memory_flush
= alloc_INSN_LIST (insn
, NULL_RTX
);
1609 deps
->pending_flush_length
= 1;
1613 /* Instruction which dependencies we are analyzing. */
1614 static rtx cur_insn
= NULL_RTX
;
1616 /* Implement hooks for haifa scheduler. */
1619 haifa_start_insn (rtx insn
)
1621 gcc_assert (insn
&& !cur_insn
);
1627 haifa_finish_insn (void)
1633 haifa_note_reg_set (int regno
)
1635 SET_REGNO_REG_SET (reg_pending_sets
, regno
);
1639 haifa_note_reg_clobber (int regno
)
1641 SET_REGNO_REG_SET (reg_pending_clobbers
, regno
);
1645 haifa_note_reg_use (int regno
)
1647 SET_REGNO_REG_SET (reg_pending_uses
, regno
);
1651 haifa_note_mem_dep (rtx mem
, rtx pending_mem
, rtx pending_insn
, ds_t ds
)
1653 if (!(ds
& SPECULATIVE
))
1656 pending_mem
= NULL_RTX
;
1659 gcc_assert (ds
& BEGIN_DATA
);
1662 dep_def _dep
, *dep
= &_dep
;
1664 init_dep_1 (dep
, pending_insn
, cur_insn
, ds_to_dt (ds
),
1665 current_sched_info
->flags
& USE_DEPS_LIST
? ds
: -1);
1666 maybe_add_or_update_dep_1 (dep
, false, pending_mem
, mem
);
1672 haifa_note_dep (rtx elem
, ds_t ds
)
1677 init_dep (dep
, elem
, cur_insn
, ds_to_dt (ds
));
1678 maybe_add_or_update_dep_1 (dep
, false, NULL_RTX
, NULL_RTX
);
1682 note_reg_use (int r
)
1684 if (sched_deps_info
->note_reg_use
)
1685 sched_deps_info
->note_reg_use (r
);
1689 note_reg_set (int r
)
1691 if (sched_deps_info
->note_reg_set
)
1692 sched_deps_info
->note_reg_set (r
);
1696 note_reg_clobber (int r
)
1698 if (sched_deps_info
->note_reg_clobber
)
1699 sched_deps_info
->note_reg_clobber (r
);
1703 note_mem_dep (rtx m1
, rtx m2
, rtx e
, ds_t ds
)
1705 if (sched_deps_info
->note_mem_dep
)
1706 sched_deps_info
->note_mem_dep (m1
, m2
, e
, ds
);
1710 note_dep (rtx e
, ds_t ds
)
1712 if (sched_deps_info
->note_dep
)
1713 sched_deps_info
->note_dep (e
, ds
);
1716 /* Return corresponding to DS reg_note. */
1721 return REG_DEP_TRUE
;
1722 else if (ds
& DEP_OUTPUT
)
1723 return REG_DEP_OUTPUT
;
1726 gcc_assert (ds
& DEP_ANTI
);
1727 return REG_DEP_ANTI
;
1733 /* Functions for computation of info needed for register pressure
1734 sensitive insn scheduling. */
1737 /* Allocate and return reg_use_data structure for REGNO and INSN. */
1738 static struct reg_use_data
*
1739 create_insn_reg_use (int regno
, rtx insn
)
1741 struct reg_use_data
*use
;
1743 use
= (struct reg_use_data
*) xmalloc (sizeof (struct reg_use_data
));
1746 use
->next_insn_use
= INSN_REG_USE_LIST (insn
);
1747 INSN_REG_USE_LIST (insn
) = use
;
1751 /* Allocate and return reg_set_data structure for REGNO and INSN. */
1752 static struct reg_set_data
*
1753 create_insn_reg_set (int regno
, rtx insn
)
1755 struct reg_set_data
*set
;
1757 set
= (struct reg_set_data
*) xmalloc (sizeof (struct reg_set_data
));
1760 set
->next_insn_set
= INSN_REG_SET_LIST (insn
);
1761 INSN_REG_SET_LIST (insn
) = set
;
1765 /* Set up insn register uses for INSN and dependency context DEPS. */
1767 setup_insn_reg_uses (struct deps
*deps
, rtx insn
)
1770 reg_set_iterator rsi
;
1772 struct reg_use_data
*use
, *use2
, *next
;
1773 struct deps_reg
*reg_last
;
1775 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses
, 0, i
, rsi
)
1777 if (i
< FIRST_PSEUDO_REGISTER
1778 && TEST_HARD_REG_BIT (ira_no_alloc_regs
, i
))
1781 if (find_regno_note (insn
, REG_DEAD
, i
) == NULL_RTX
1782 && ! REGNO_REG_SET_P (reg_pending_sets
, i
)
1783 && ! REGNO_REG_SET_P (reg_pending_clobbers
, i
))
1784 /* Ignore use which is not dying. */
1787 use
= create_insn_reg_use (i
, insn
);
1788 use
->next_regno_use
= use
;
1789 reg_last
= &deps
->reg_last
[i
];
1791 /* Create the cycle list of uses. */
1792 for (list
= reg_last
->uses
; list
; list
= XEXP (list
, 1))
1794 use2
= create_insn_reg_use (i
, XEXP (list
, 0));
1795 next
= use
->next_regno_use
;
1796 use
->next_regno_use
= use2
;
1797 use2
->next_regno_use
= next
;
1802 /* Register pressure info for the currently processed insn. */
1803 static struct reg_pressure_data reg_pressure_info
[N_REG_CLASSES
];
1805 /* Return TRUE if INSN has the use structure for REGNO. */
1807 insn_use_p (rtx insn
, int regno
)
1809 struct reg_use_data
*use
;
1811 for (use
= INSN_REG_USE_LIST (insn
); use
!= NULL
; use
= use
->next_insn_use
)
1812 if (use
->regno
== regno
)
1817 /* Update the register pressure info after birth of pseudo register REGNO
1818 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
1819 the register is in clobber or unused after the insn. */
1821 mark_insn_pseudo_birth (rtx insn
, int regno
, bool clobber_p
, bool unused_p
)
1826 gcc_assert (regno
>= FIRST_PSEUDO_REGISTER
);
1827 cl
= sched_regno_cover_class
[regno
];
1830 incr
= ira_reg_class_nregs
[cl
][PSEUDO_REGNO_MODE (regno
)];
1833 new_incr
= reg_pressure_info
[cl
].clobber_increase
+ incr
;
1834 reg_pressure_info
[cl
].clobber_increase
= new_incr
;
1838 new_incr
= reg_pressure_info
[cl
].unused_set_increase
+ incr
;
1839 reg_pressure_info
[cl
].unused_set_increase
= new_incr
;
1843 new_incr
= reg_pressure_info
[cl
].set_increase
+ incr
;
1844 reg_pressure_info
[cl
].set_increase
= new_incr
;
1845 if (! insn_use_p (insn
, regno
))
1846 reg_pressure_info
[cl
].change
+= incr
;
1847 create_insn_reg_set (regno
, insn
);
1849 gcc_assert (new_incr
< (1 << INCREASE_BITS
));
1853 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
1854 hard registers involved in the birth. */
1856 mark_insn_hard_regno_birth (rtx insn
, int regno
, int nregs
,
1857 bool clobber_p
, bool unused_p
)
1860 int new_incr
, last
= regno
+ nregs
;
1862 while (regno
< last
)
1864 gcc_assert (regno
< FIRST_PSEUDO_REGISTER
);
1865 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs
, regno
))
1867 cl
= sched_regno_cover_class
[regno
];
1872 new_incr
= reg_pressure_info
[cl
].clobber_increase
+ 1;
1873 reg_pressure_info
[cl
].clobber_increase
= new_incr
;
1877 new_incr
= reg_pressure_info
[cl
].unused_set_increase
+ 1;
1878 reg_pressure_info
[cl
].unused_set_increase
= new_incr
;
1882 new_incr
= reg_pressure_info
[cl
].set_increase
+ 1;
1883 reg_pressure_info
[cl
].set_increase
= new_incr
;
1884 if (! insn_use_p (insn
, regno
))
1885 reg_pressure_info
[cl
].change
+= 1;
1886 create_insn_reg_set (regno
, insn
);
1888 gcc_assert (new_incr
< (1 << INCREASE_BITS
));
1895 /* Update the register pressure info after birth of pseudo or hard
1896 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
1897 correspondingly that the register is in clobber or unused after the
1900 mark_insn_reg_birth (rtx insn
, rtx reg
, bool clobber_p
, bool unused_p
)
1904 if (GET_CODE (reg
) == SUBREG
)
1905 reg
= SUBREG_REG (reg
);
1910 regno
= REGNO (reg
);
1911 if (regno
< FIRST_PSEUDO_REGISTER
)
1912 mark_insn_hard_regno_birth (insn
, regno
,
1913 hard_regno_nregs
[regno
][GET_MODE (reg
)],
1914 clobber_p
, unused_p
);
1916 mark_insn_pseudo_birth (insn
, regno
, clobber_p
, unused_p
);
1919 /* Update the register pressure info after death of pseudo register
1922 mark_pseudo_death (int regno
)
1927 gcc_assert (regno
>= FIRST_PSEUDO_REGISTER
);
1928 cl
= sched_regno_cover_class
[regno
];
1931 incr
= ira_reg_class_nregs
[cl
][PSEUDO_REGNO_MODE (regno
)];
1932 reg_pressure_info
[cl
].change
-= incr
;
1936 /* Like mark_pseudo_death except that NREGS saying how many hard
1937 registers involved in the death. */
1939 mark_hard_regno_death (int regno
, int nregs
)
1942 int last
= regno
+ nregs
;
1944 while (regno
< last
)
1946 gcc_assert (regno
< FIRST_PSEUDO_REGISTER
);
1947 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs
, regno
))
1949 cl
= sched_regno_cover_class
[regno
];
1951 reg_pressure_info
[cl
].change
-= 1;
1957 /* Update the register pressure info after death of pseudo or hard
1960 mark_reg_death (rtx reg
)
1964 if (GET_CODE (reg
) == SUBREG
)
1965 reg
= SUBREG_REG (reg
);
1970 regno
= REGNO (reg
);
1971 if (regno
< FIRST_PSEUDO_REGISTER
)
1972 mark_hard_regno_death (regno
, hard_regno_nregs
[regno
][GET_MODE (reg
)]);
1974 mark_pseudo_death (regno
);
1977 /* Process SETTER of REG. DATA is an insn containing the setter. */
1979 mark_insn_reg_store (rtx reg
, const_rtx setter
, void *data
)
1981 if (setter
!= NULL_RTX
&& GET_CODE (setter
) != SET
)
1984 ((rtx
) data
, reg
, false,
1985 find_reg_note ((const_rtx
) data
, REG_UNUSED
, reg
) != NULL_RTX
);
1988 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
1990 mark_insn_reg_clobber (rtx reg
, const_rtx setter
, void *data
)
1992 if (GET_CODE (setter
) == CLOBBER
)
1993 mark_insn_reg_birth ((rtx
) data
, reg
, true, false);
1996 /* Set up reg pressure info related to INSN. */
1998 setup_insn_reg_pressure_info (rtx insn
)
2002 static struct reg_pressure_data
*pressure_info
;
2005 gcc_assert (sched_pressure_p
);
2007 if (! INSN_P (insn
))
2010 for (i
= 0; i
< ira_reg_class_cover_size
; i
++)
2012 cl
= ira_reg_class_cover
[i
];
2013 reg_pressure_info
[cl
].clobber_increase
= 0;
2014 reg_pressure_info
[cl
].set_increase
= 0;
2015 reg_pressure_info
[cl
].unused_set_increase
= 0;
2016 reg_pressure_info
[cl
].change
= 0;
2019 note_stores (PATTERN (insn
), mark_insn_reg_clobber
, insn
);
2021 note_stores (PATTERN (insn
), mark_insn_reg_store
, insn
);
2024 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2025 if (REG_NOTE_KIND (link
) == REG_INC
)
2026 mark_insn_reg_store (XEXP (link
, 0), NULL_RTX
, insn
);
2029 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2030 if (REG_NOTE_KIND (link
) == REG_DEAD
)
2031 mark_reg_death (XEXP (link
, 0));
2033 len
= sizeof (struct reg_pressure_data
) * ira_reg_class_cover_size
;
2035 = INSN_REG_PRESSURE (insn
) = (struct reg_pressure_data
*) xmalloc (len
);
2036 INSN_MAX_REG_PRESSURE (insn
) = (int *) xcalloc (ira_reg_class_cover_size
2038 for (i
= 0; i
< ira_reg_class_cover_size
; i
++)
2040 cl
= ira_reg_class_cover
[i
];
2041 pressure_info
[i
].clobber_increase
2042 = reg_pressure_info
[cl
].clobber_increase
;
2043 pressure_info
[i
].set_increase
= reg_pressure_info
[cl
].set_increase
;
2044 pressure_info
[i
].unused_set_increase
2045 = reg_pressure_info
[cl
].unused_set_increase
;
2046 pressure_info
[i
].change
= reg_pressure_info
[cl
].change
;
2053 /* Internal variable for sched_analyze_[12] () functions.
2054 If it is nonzero, this means that sched_analyze_[12] looks
2055 at the most toplevel SET. */
2056 static bool can_start_lhs_rhs_p
;
2058 /* Extend reg info for the deps context DEPS given that
2059 we have just generated a register numbered REGNO. */
2061 extend_deps_reg_info (struct deps
*deps
, int regno
)
2063 int max_regno
= regno
+ 1;
2065 gcc_assert (!reload_completed
);
2067 /* In a readonly context, it would not hurt to extend info,
2068 but it should not be needed. */
2069 if (reload_completed
&& deps
->readonly
)
2071 deps
->max_reg
= max_regno
;
2075 if (max_regno
> deps
->max_reg
)
2077 deps
->reg_last
= XRESIZEVEC (struct deps_reg
, deps
->reg_last
,
2079 memset (&deps
->reg_last
[deps
->max_reg
],
2080 0, (max_regno
- deps
->max_reg
)
2081 * sizeof (struct deps_reg
));
2082 deps
->max_reg
= max_regno
;
2086 /* Extends REG_INFO_P if needed. */
2088 maybe_extend_reg_info_p (void)
2090 /* Extend REG_INFO_P, if needed. */
2091 if ((unsigned int)max_regno
- 1 >= reg_info_p_size
)
2093 size_t new_reg_info_p_size
= max_regno
+ 128;
2095 gcc_assert (!reload_completed
&& sel_sched_p ());
2097 reg_info_p
= (struct reg_info_t
*) xrecalloc (reg_info_p
,
2098 new_reg_info_p_size
,
2100 sizeof (*reg_info_p
));
2101 reg_info_p_size
= new_reg_info_p_size
;
2105 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2106 The type of the reference is specified by REF and can be SET,
2107 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2110 sched_analyze_reg (struct deps
*deps
, int regno
, enum machine_mode mode
,
2111 enum rtx_code ref
, rtx insn
)
2113 /* We could emit new pseudos in renaming. Extend the reg structures. */
2114 if (!reload_completed
&& sel_sched_p ()
2115 && (regno
>= max_reg_num () - 1 || regno
>= deps
->max_reg
))
2116 extend_deps_reg_info (deps
, regno
);
2118 maybe_extend_reg_info_p ();
2120 /* A hard reg in a wide mode may really be multiple registers.
2121 If so, mark all of them just like the first. */
2122 if (regno
< FIRST_PSEUDO_REGISTER
)
2124 int i
= hard_regno_nregs
[regno
][mode
];
2128 note_reg_set (regno
+ i
);
2130 else if (ref
== USE
)
2133 note_reg_use (regno
+ i
);
2138 note_reg_clobber (regno
+ i
);
2142 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2143 it does not reload. Ignore these as they have served their
2145 else if (regno
>= deps
->max_reg
)
2147 enum rtx_code code
= GET_CODE (PATTERN (insn
));
2148 gcc_assert (code
== USE
|| code
== CLOBBER
);
2154 note_reg_set (regno
);
2155 else if (ref
== USE
)
2156 note_reg_use (regno
);
2158 note_reg_clobber (regno
);
2160 /* Pseudos that are REG_EQUIV to something may be replaced
2161 by that during reloading. We need only add dependencies for
2162 the address in the REG_EQUIV note. */
2163 if (!reload_completed
&& get_reg_known_equiv_p (regno
))
2165 rtx t
= get_reg_known_value (regno
);
2167 sched_analyze_2 (deps
, XEXP (t
, 0), insn
);
2170 /* Don't let it cross a call after scheduling if it doesn't
2171 already cross one. */
2172 if (REG_N_CALLS_CROSSED (regno
) == 0)
2174 if (!deps
->readonly
&& ref
== USE
&& !DEBUG_INSN_P (insn
))
2175 deps
->sched_before_next_call
2176 = alloc_INSN_LIST (insn
, deps
->sched_before_next_call
);
2178 add_dependence_list (insn
, deps
->last_function_call
, 1,
2184 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2185 rtx, X, creating all dependencies generated by the write to the
2186 destination of X, and reads of everything mentioned. */
2189 sched_analyze_1 (struct deps
*deps
, rtx x
, rtx insn
)
2191 rtx dest
= XEXP (x
, 0);
2192 enum rtx_code code
= GET_CODE (x
);
2193 bool cslr_p
= can_start_lhs_rhs_p
;
2195 can_start_lhs_rhs_p
= false;
2201 if (cslr_p
&& sched_deps_info
->start_lhs
)
2202 sched_deps_info
->start_lhs (dest
);
2204 if (GET_CODE (dest
) == PARALLEL
)
2208 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
2209 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
2210 sched_analyze_1 (deps
,
2211 gen_rtx_CLOBBER (VOIDmode
,
2212 XEXP (XVECEXP (dest
, 0, i
), 0)),
2215 if (cslr_p
&& sched_deps_info
->finish_lhs
)
2216 sched_deps_info
->finish_lhs ();
2220 can_start_lhs_rhs_p
= cslr_p
;
2222 sched_analyze_2 (deps
, SET_SRC (x
), insn
);
2224 can_start_lhs_rhs_p
= false;
2230 while (GET_CODE (dest
) == STRICT_LOW_PART
|| GET_CODE (dest
) == SUBREG
2231 || GET_CODE (dest
) == ZERO_EXTRACT
)
2233 if (GET_CODE (dest
) == STRICT_LOW_PART
2234 || GET_CODE (dest
) == ZERO_EXTRACT
2235 || df_read_modify_subreg_p (dest
))
2237 /* These both read and modify the result. We must handle
2238 them as writes to get proper dependencies for following
2239 instructions. We must handle them as reads to get proper
2240 dependencies from this to previous instructions.
2241 Thus we need to call sched_analyze_2. */
2243 sched_analyze_2 (deps
, XEXP (dest
, 0), insn
);
2245 if (GET_CODE (dest
) == ZERO_EXTRACT
)
2247 /* The second and third arguments are values read by this insn. */
2248 sched_analyze_2 (deps
, XEXP (dest
, 1), insn
);
2249 sched_analyze_2 (deps
, XEXP (dest
, 2), insn
);
2251 dest
= XEXP (dest
, 0);
2256 int regno
= REGNO (dest
);
2257 enum machine_mode mode
= GET_MODE (dest
);
2259 sched_analyze_reg (deps
, regno
, mode
, code
, insn
);
2262 /* Treat all writes to a stack register as modifying the TOS. */
2263 if (regno
>= FIRST_STACK_REG
&& regno
<= LAST_STACK_REG
)
2267 /* Avoid analyzing the same register twice. */
2268 if (regno
!= FIRST_STACK_REG
)
2269 sched_analyze_reg (deps
, FIRST_STACK_REG
, mode
, code
, insn
);
2271 nregs
= hard_regno_nregs
[FIRST_STACK_REG
][mode
];
2272 while (--nregs
>= 0)
2273 SET_HARD_REG_BIT (implicit_reg_pending_uses
,
2274 FIRST_STACK_REG
+ nregs
);
2278 else if (MEM_P (dest
))
2280 /* Writing memory. */
2283 if (sched_deps_info
->use_cselib
)
2285 enum machine_mode address_mode
2286 = targetm
.addr_space
.address_mode (MEM_ADDR_SPACE (dest
));
2288 t
= shallow_copy_rtx (dest
);
2289 cselib_lookup (XEXP (t
, 0), address_mode
, 1);
2290 XEXP (t
, 0) = cselib_subst_to_values (XEXP (t
, 0));
2294 /* Pending lists can't get larger with a readonly context. */
2296 && ((deps
->pending_read_list_length
+ deps
->pending_write_list_length
)
2297 > MAX_PENDING_LIST_LENGTH
))
2299 /* Flush all pending reads and writes to prevent the pending lists
2300 from getting any larger. Insn scheduling runs too slowly when
2301 these lists get long. When compiling GCC with itself,
2302 this flush occurs 8 times for sparc, and 10 times for m88k using
2303 the default value of 32. */
2304 flush_pending_lists (deps
, insn
, false, true);
2308 rtx pending
, pending_mem
;
2310 pending
= deps
->pending_read_insns
;
2311 pending_mem
= deps
->pending_read_mems
;
2314 if (anti_dependence (XEXP (pending_mem
, 0), t
)
2315 && ! sched_insns_conditions_mutex_p (insn
, XEXP (pending
, 0)))
2316 note_mem_dep (t
, XEXP (pending_mem
, 0), XEXP (pending
, 0),
2319 pending
= XEXP (pending
, 1);
2320 pending_mem
= XEXP (pending_mem
, 1);
2323 pending
= deps
->pending_write_insns
;
2324 pending_mem
= deps
->pending_write_mems
;
2327 if (output_dependence (XEXP (pending_mem
, 0), t
)
2328 && ! sched_insns_conditions_mutex_p (insn
, XEXP (pending
, 0)))
2329 note_mem_dep (t
, XEXP (pending_mem
, 0), XEXP (pending
, 0),
2332 pending
= XEXP (pending
, 1);
2333 pending_mem
= XEXP (pending_mem
, 1);
2336 add_dependence_list (insn
, deps
->last_pending_memory_flush
, 1,
2339 if (!deps
->readonly
)
2340 add_insn_mem_dependence (deps
, false, insn
, dest
);
2342 sched_analyze_2 (deps
, XEXP (dest
, 0), insn
);
2345 if (cslr_p
&& sched_deps_info
->finish_lhs
)
2346 sched_deps_info
->finish_lhs ();
2348 /* Analyze reads. */
2349 if (GET_CODE (x
) == SET
)
2351 can_start_lhs_rhs_p
= cslr_p
;
2353 sched_analyze_2 (deps
, SET_SRC (x
), insn
);
2355 can_start_lhs_rhs_p
= false;
2359 /* Analyze the uses of memory and registers in rtx X in INSN. */
2361 sched_analyze_2 (struct deps
*deps
, rtx x
, rtx insn
)
2367 bool cslr_p
= can_start_lhs_rhs_p
;
2369 can_start_lhs_rhs_p
= false;
2375 if (cslr_p
&& sched_deps_info
->start_rhs
)
2376 sched_deps_info
->start_rhs (x
);
2378 code
= GET_CODE (x
);
2389 /* Ignore constants. */
2390 if (cslr_p
&& sched_deps_info
->finish_rhs
)
2391 sched_deps_info
->finish_rhs ();
2397 /* User of CC0 depends on immediately preceding insn. */
2398 SCHED_GROUP_P (insn
) = 1;
2399 /* Don't move CC0 setter to another block (it can set up the
2400 same flag for previous CC0 users which is safe). */
2401 CANT_MOVE (prev_nonnote_insn (insn
)) = 1;
2403 if (cslr_p
&& sched_deps_info
->finish_rhs
)
2404 sched_deps_info
->finish_rhs ();
2411 int regno
= REGNO (x
);
2412 enum machine_mode mode
= GET_MODE (x
);
2414 sched_analyze_reg (deps
, regno
, mode
, USE
, insn
);
2417 /* Treat all reads of a stack register as modifying the TOS. */
2418 if (regno
>= FIRST_STACK_REG
&& regno
<= LAST_STACK_REG
)
2420 /* Avoid analyzing the same register twice. */
2421 if (regno
!= FIRST_STACK_REG
)
2422 sched_analyze_reg (deps
, FIRST_STACK_REG
, mode
, USE
, insn
);
2423 sched_analyze_reg (deps
, FIRST_STACK_REG
, mode
, SET
, insn
);
2427 if (cslr_p
&& sched_deps_info
->finish_rhs
)
2428 sched_deps_info
->finish_rhs ();
2435 /* Reading memory. */
2437 rtx pending
, pending_mem
;
2440 if (sched_deps_info
->use_cselib
)
2442 enum machine_mode address_mode
2443 = targetm
.addr_space
.address_mode (MEM_ADDR_SPACE (t
));
2445 t
= shallow_copy_rtx (t
);
2446 cselib_lookup (XEXP (t
, 0), address_mode
, 1);
2447 XEXP (t
, 0) = cselib_subst_to_values (XEXP (t
, 0));
2450 if (!DEBUG_INSN_P (insn
))
2453 pending
= deps
->pending_read_insns
;
2454 pending_mem
= deps
->pending_read_mems
;
2457 if (read_dependence (XEXP (pending_mem
, 0), t
)
2458 && ! sched_insns_conditions_mutex_p (insn
,
2460 note_mem_dep (t
, XEXP (pending_mem
, 0), XEXP (pending
, 0),
2463 pending
= XEXP (pending
, 1);
2464 pending_mem
= XEXP (pending_mem
, 1);
2467 pending
= deps
->pending_write_insns
;
2468 pending_mem
= deps
->pending_write_mems
;
2471 if (true_dependence (XEXP (pending_mem
, 0), VOIDmode
,
2473 && ! sched_insns_conditions_mutex_p (insn
,
2475 note_mem_dep (t
, XEXP (pending_mem
, 0), XEXP (pending
, 0),
2476 sched_deps_info
->generate_spec_deps
2477 ? BEGIN_DATA
| DEP_TRUE
: DEP_TRUE
);
2479 pending
= XEXP (pending
, 1);
2480 pending_mem
= XEXP (pending_mem
, 1);
2483 for (u
= deps
->last_pending_memory_flush
; u
; u
= XEXP (u
, 1))
2485 if (! JUMP_P (XEXP (u
, 0)))
2486 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
2487 else if (deps_may_trap_p (x
))
2489 if ((sched_deps_info
->generate_spec_deps
)
2490 && sel_sched_p () && (spec_info
->mask
& BEGIN_CONTROL
))
2492 ds_t ds
= set_dep_weak (DEP_ANTI
, BEGIN_CONTROL
,
2495 note_dep (XEXP (u
, 0), ds
);
2498 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
2503 /* Always add these dependencies to pending_reads, since
2504 this insn may be followed by a write. */
2505 if (!deps
->readonly
)
2506 add_insn_mem_dependence (deps
, true, insn
, x
);
2508 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
2510 if (cslr_p
&& sched_deps_info
->finish_rhs
)
2511 sched_deps_info
->finish_rhs ();
2516 /* Force pending stores to memory in case a trap handler needs them. */
2518 flush_pending_lists (deps
, insn
, true, false);
2522 if (PREFETCH_SCHEDULE_BARRIER_P (x
))
2523 reg_pending_barrier
= TRUE_BARRIER
;
2526 case UNSPEC_VOLATILE
:
2527 flush_pending_lists (deps
, insn
, true, true);
2533 /* Traditional and volatile asm instructions must be considered to use
2534 and clobber all hard registers, all pseudo-registers and all of
2535 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2537 Consider for instance a volatile asm that changes the fpu rounding
2538 mode. An insn should not be moved across this even if it only uses
2539 pseudo-regs because it might give an incorrectly rounded result. */
2540 if (code
!= ASM_OPERANDS
|| MEM_VOLATILE_P (x
))
2541 reg_pending_barrier
= TRUE_BARRIER
;
2543 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2544 We can not just fall through here since then we would be confused
2545 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2546 traditional asms unlike their normal usage. */
2548 if (code
== ASM_OPERANDS
)
2550 for (j
= 0; j
< ASM_OPERANDS_INPUT_LENGTH (x
); j
++)
2551 sched_analyze_2 (deps
, ASM_OPERANDS_INPUT (x
, j
), insn
);
2553 if (cslr_p
&& sched_deps_info
->finish_rhs
)
2554 sched_deps_info
->finish_rhs ();
2565 /* These both read and modify the result. We must handle them as writes
2566 to get proper dependencies for following instructions. We must handle
2567 them as reads to get proper dependencies from this to previous
2568 instructions. Thus we need to pass them to both sched_analyze_1
2569 and sched_analyze_2. We must call sched_analyze_2 first in order
2570 to get the proper antecedent for the read. */
2571 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
2572 sched_analyze_1 (deps
, x
, insn
);
2574 if (cslr_p
&& sched_deps_info
->finish_rhs
)
2575 sched_deps_info
->finish_rhs ();
2581 /* op0 = op0 + op1 */
2582 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
2583 sched_analyze_2 (deps
, XEXP (x
, 1), insn
);
2584 sched_analyze_1 (deps
, x
, insn
);
2586 if (cslr_p
&& sched_deps_info
->finish_rhs
)
2587 sched_deps_info
->finish_rhs ();
2595 /* Other cases: walk the insn. */
2596 fmt
= GET_RTX_FORMAT (code
);
2597 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2600 sched_analyze_2 (deps
, XEXP (x
, i
), insn
);
2601 else if (fmt
[i
] == 'E')
2602 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2603 sched_analyze_2 (deps
, XVECEXP (x
, i
, j
), insn
);
2606 if (cslr_p
&& sched_deps_info
->finish_rhs
)
2607 sched_deps_info
->finish_rhs ();
2610 /* Analyze an INSN with pattern X to find all dependencies. */
2612 sched_analyze_insn (struct deps
*deps
, rtx x
, rtx insn
)
2614 RTX_CODE code
= GET_CODE (x
);
2617 reg_set_iterator rsi
;
2619 if (! reload_completed
)
2623 extract_insn (insn
);
2624 preprocess_constraints ();
2625 ira_implicitly_set_insn_hard_regs (&temp
);
2626 AND_COMPL_HARD_REG_SET (temp
, ira_no_alloc_regs
);
2627 IOR_HARD_REG_SET (implicit_reg_pending_clobbers
, temp
);
2630 can_start_lhs_rhs_p
= (NONJUMP_INSN_P (insn
)
2634 /* Avoid moving trapping instructions accross function calls that might
2635 not always return. */
2636 add_dependence_list (insn
, deps
->last_function_call_may_noreturn
,
2639 if (code
== COND_EXEC
)
2641 sched_analyze_2 (deps
, COND_EXEC_TEST (x
), insn
);
2643 /* ??? Should be recording conditions so we reduce the number of
2644 false dependencies. */
2645 x
= COND_EXEC_CODE (x
);
2646 code
= GET_CODE (x
);
2648 if (code
== SET
|| code
== CLOBBER
)
2650 sched_analyze_1 (deps
, x
, insn
);
2652 /* Bare clobber insns are used for letting life analysis, reg-stack
2653 and others know that a value is dead. Depend on the last call
2654 instruction so that reg-stack won't get confused. */
2655 if (code
== CLOBBER
)
2656 add_dependence_list (insn
, deps
->last_function_call
, 1,
2659 else if (code
== PARALLEL
)
2661 for (i
= XVECLEN (x
, 0); i
--;)
2663 rtx sub
= XVECEXP (x
, 0, i
);
2664 code
= GET_CODE (sub
);
2666 if (code
== COND_EXEC
)
2668 sched_analyze_2 (deps
, COND_EXEC_TEST (sub
), insn
);
2669 sub
= COND_EXEC_CODE (sub
);
2670 code
= GET_CODE (sub
);
2672 if (code
== SET
|| code
== CLOBBER
)
2673 sched_analyze_1 (deps
, sub
, insn
);
2675 sched_analyze_2 (deps
, sub
, insn
);
2679 sched_analyze_2 (deps
, x
, insn
);
2681 /* Mark registers CLOBBERED or used by called function. */
2684 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
2686 if (GET_CODE (XEXP (link
, 0)) == CLOBBER
)
2687 sched_analyze_1 (deps
, XEXP (link
, 0), insn
);
2689 sched_analyze_2 (deps
, XEXP (link
, 0), insn
);
2691 if (find_reg_note (insn
, REG_SETJMP
, NULL
))
2692 reg_pending_barrier
= MOVE_BARRIER
;
2698 next
= next_nonnote_insn (insn
);
2699 while (next
&& DEBUG_INSN_P (next
))
2700 next
= next_nonnote_insn (next
);
2701 if (next
&& BARRIER_P (next
))
2702 reg_pending_barrier
= MOVE_BARRIER
;
2705 rtx pending
, pending_mem
;
2707 if (sched_deps_info
->compute_jump_reg_dependencies
)
2709 regset_head tmp_uses
, tmp_sets
;
2710 INIT_REG_SET (&tmp_uses
);
2711 INIT_REG_SET (&tmp_sets
);
2713 (*sched_deps_info
->compute_jump_reg_dependencies
)
2714 (insn
, &deps
->reg_conditional_sets
, &tmp_uses
, &tmp_sets
);
2715 /* Make latency of jump equal to 0 by using anti-dependence. */
2716 EXECUTE_IF_SET_IN_REG_SET (&tmp_uses
, 0, i
, rsi
)
2718 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
2719 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_ANTI
);
2720 add_dependence_list (insn
, reg_last
->implicit_sets
,
2722 add_dependence_list (insn
, reg_last
->clobbers
, 0,
2725 if (!deps
->readonly
)
2727 reg_last
->uses_length
++;
2728 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
2731 IOR_REG_SET (reg_pending_sets
, &tmp_sets
);
2733 CLEAR_REG_SET (&tmp_uses
);
2734 CLEAR_REG_SET (&tmp_sets
);
2737 /* All memory writes and volatile reads must happen before the
2738 jump. Non-volatile reads must happen before the jump iff
2739 the result is needed by the above register used mask. */
2741 pending
= deps
->pending_write_insns
;
2742 pending_mem
= deps
->pending_write_mems
;
2745 if (! sched_insns_conditions_mutex_p (insn
, XEXP (pending
, 0)))
2746 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
2747 pending
= XEXP (pending
, 1);
2748 pending_mem
= XEXP (pending_mem
, 1);
2751 pending
= deps
->pending_read_insns
;
2752 pending_mem
= deps
->pending_read_mems
;
2755 if (MEM_VOLATILE_P (XEXP (pending_mem
, 0))
2756 && ! sched_insns_conditions_mutex_p (insn
, XEXP (pending
, 0)))
2757 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
2758 pending
= XEXP (pending
, 1);
2759 pending_mem
= XEXP (pending_mem
, 1);
2762 add_dependence_list (insn
, deps
->last_pending_memory_flush
, 1,
2767 /* If this instruction can throw an exception, then moving it changes
2768 where block boundaries fall. This is mighty confusing elsewhere.
2769 Therefore, prevent such an instruction from being moved. Same for
2770 non-jump instructions that define block boundaries.
2771 ??? Unclear whether this is still necessary in EBB mode. If not,
2772 add_branch_dependences should be adjusted for RGN mode instead. */
2773 if (((CALL_P (insn
) || JUMP_P (insn
)) && can_throw_internal (insn
))
2774 || (NONJUMP_INSN_P (insn
) && control_flow_insn_p (insn
)))
2775 reg_pending_barrier
= MOVE_BARRIER
;
2777 if (sched_pressure_p
)
2779 setup_insn_reg_uses (deps
, insn
);
2780 setup_insn_reg_pressure_info (insn
);
2783 /* Add register dependencies for insn. */
2784 if (DEBUG_INSN_P (insn
))
2786 rtx prev
= deps
->last_debug_insn
;
2789 if (!deps
->readonly
)
2790 deps
->last_debug_insn
= insn
;
2793 add_dependence (insn
, prev
, REG_DEP_ANTI
);
2795 add_dependence_list (insn
, deps
->last_function_call
, 1,
2798 for (u
= deps
->last_pending_memory_flush
; u
; u
= XEXP (u
, 1))
2799 if (! JUMP_P (XEXP (u
, 0))
2801 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
2803 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses
, 0, i
, rsi
)
2805 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
2806 add_dependence_list (insn
, reg_last
->sets
, 1, REG_DEP_ANTI
);
2807 add_dependence_list (insn
, reg_last
->clobbers
, 1, REG_DEP_ANTI
);
2809 if (!deps
->readonly
)
2810 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
2812 CLEAR_REG_SET (reg_pending_uses
);
2814 /* Quite often, a debug insn will refer to stuff in the
2815 previous instruction, but the reason we want this
2816 dependency here is to make sure the scheduler doesn't
2817 gratuitously move a debug insn ahead. This could dirty
2818 DF flags and cause additional analysis that wouldn't have
2819 occurred in compilation without debug insns, and such
2820 additional analysis can modify the generated code. */
2821 prev
= PREV_INSN (insn
);
2823 if (prev
&& NONDEBUG_INSN_P (prev
))
2824 add_dependence (insn
, prev
, REG_DEP_ANTI
);
2828 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses
, 0, i
, rsi
)
2830 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
2831 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_TRUE
);
2832 add_dependence_list (insn
, reg_last
->implicit_sets
, 0, REG_DEP_ANTI
);
2833 add_dependence_list (insn
, reg_last
->clobbers
, 0, REG_DEP_TRUE
);
2835 if (!deps
->readonly
)
2837 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
2838 reg_last
->uses_length
++;
2842 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
2843 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses
, i
))
2845 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
2846 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_TRUE
);
2847 add_dependence_list (insn
, reg_last
->implicit_sets
, 0,
2849 add_dependence_list (insn
, reg_last
->clobbers
, 0, REG_DEP_TRUE
);
2851 if (!deps
->readonly
)
2853 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
2854 reg_last
->uses_length
++;
2858 /* If the current insn is conditional, we can't free any
2860 if (sched_has_condition_p (insn
))
2862 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers
, 0, i
, rsi
)
2864 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
2865 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_OUTPUT
);
2866 add_dependence_list (insn
, reg_last
->implicit_sets
, 0,
2868 add_dependence_list (insn
, reg_last
->uses
, 0, REG_DEP_ANTI
);
2870 if (!deps
->readonly
)
2873 = alloc_INSN_LIST (insn
, reg_last
->clobbers
);
2874 reg_last
->clobbers_length
++;
2877 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets
, 0, i
, rsi
)
2879 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
2880 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_OUTPUT
);
2881 add_dependence_list (insn
, reg_last
->implicit_sets
, 0,
2883 add_dependence_list (insn
, reg_last
->clobbers
, 0, REG_DEP_OUTPUT
);
2884 add_dependence_list (insn
, reg_last
->uses
, 0, REG_DEP_ANTI
);
2886 if (!deps
->readonly
)
2888 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
2889 SET_REGNO_REG_SET (&deps
->reg_conditional_sets
, i
);
2895 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers
, 0, i
, rsi
)
2897 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
2898 if (reg_last
->uses_length
> MAX_PENDING_LIST_LENGTH
2899 || reg_last
->clobbers_length
> MAX_PENDING_LIST_LENGTH
)
2901 add_dependence_list_and_free (deps
, insn
, ®_last
->sets
, 0,
2903 add_dependence_list_and_free (deps
, insn
,
2904 ®_last
->implicit_sets
, 0,
2906 add_dependence_list_and_free (deps
, insn
, ®_last
->uses
, 0,
2908 add_dependence_list_and_free
2909 (deps
, insn
, ®_last
->clobbers
, 0, REG_DEP_OUTPUT
);
2911 if (!deps
->readonly
)
2913 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
2914 reg_last
->clobbers_length
= 0;
2915 reg_last
->uses_length
= 0;
2920 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_OUTPUT
);
2921 add_dependence_list (insn
, reg_last
->implicit_sets
, 0,
2923 add_dependence_list (insn
, reg_last
->uses
, 0, REG_DEP_ANTI
);
2926 if (!deps
->readonly
)
2928 reg_last
->clobbers_length
++;
2930 = alloc_INSN_LIST (insn
, reg_last
->clobbers
);
2933 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets
, 0, i
, rsi
)
2935 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
2937 add_dependence_list_and_free (deps
, insn
, ®_last
->sets
, 0,
2939 add_dependence_list_and_free (deps
, insn
,
2940 ®_last
->implicit_sets
,
2942 add_dependence_list_and_free (deps
, insn
, ®_last
->clobbers
, 0,
2944 add_dependence_list_and_free (deps
, insn
, ®_last
->uses
, 0,
2947 if (!deps
->readonly
)
2949 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
2950 reg_last
->uses_length
= 0;
2951 reg_last
->clobbers_length
= 0;
2952 CLEAR_REGNO_REG_SET (&deps
->reg_conditional_sets
, i
);
2958 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
2959 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers
, i
))
2961 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
2962 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_ANTI
);
2963 add_dependence_list (insn
, reg_last
->clobbers
, 0, REG_DEP_ANTI
);
2964 add_dependence_list (insn
, reg_last
->uses
, 0, REG_DEP_ANTI
);
2966 if (!deps
->readonly
)
2967 reg_last
->implicit_sets
2968 = alloc_INSN_LIST (insn
, reg_last
->implicit_sets
);
2971 if (!deps
->readonly
)
2973 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_uses
);
2974 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_clobbers
);
2975 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_sets
);
2976 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
2977 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses
, i
)
2978 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers
, i
))
2979 SET_REGNO_REG_SET (&deps
->reg_last_in_use
, i
);
2981 /* Set up the pending barrier found. */
2982 deps
->last_reg_pending_barrier
= reg_pending_barrier
;
2985 CLEAR_REG_SET (reg_pending_uses
);
2986 CLEAR_REG_SET (reg_pending_clobbers
);
2987 CLEAR_REG_SET (reg_pending_sets
);
2988 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers
);
2989 CLEAR_HARD_REG_SET (implicit_reg_pending_uses
);
2991 /* Add dependencies if a scheduling barrier was found. */
2992 if (reg_pending_barrier
)
2994 /* In the case of barrier the most added dependencies are not
2995 real, so we use anti-dependence here. */
2996 if (sched_has_condition_p (insn
))
2998 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
, rsi
)
3000 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3001 add_dependence_list (insn
, reg_last
->uses
, 0, REG_DEP_ANTI
);
3002 add_dependence_list (insn
, reg_last
->sets
, 0,
3003 reg_pending_barrier
== TRUE_BARRIER
3004 ? REG_DEP_TRUE
: REG_DEP_ANTI
);
3005 add_dependence_list (insn
, reg_last
->implicit_sets
, 0,
3007 add_dependence_list (insn
, reg_last
->clobbers
, 0,
3008 reg_pending_barrier
== TRUE_BARRIER
3009 ? REG_DEP_TRUE
: REG_DEP_ANTI
);
3014 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
, rsi
)
3016 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3017 add_dependence_list_and_free (deps
, insn
, ®_last
->uses
, 0,
3019 add_dependence_list_and_free (deps
, insn
, ®_last
->sets
, 0,
3020 reg_pending_barrier
== TRUE_BARRIER
3021 ? REG_DEP_TRUE
: REG_DEP_ANTI
);
3022 add_dependence_list_and_free (deps
, insn
,
3023 ®_last
->implicit_sets
, 0,
3025 add_dependence_list_and_free (deps
, insn
, ®_last
->clobbers
, 0,
3026 reg_pending_barrier
== TRUE_BARRIER
3027 ? REG_DEP_TRUE
: REG_DEP_ANTI
);
3029 if (!deps
->readonly
)
3031 reg_last
->uses_length
= 0;
3032 reg_last
->clobbers_length
= 0;
3037 if (!deps
->readonly
)
3038 for (i
= 0; i
< (unsigned)deps
->max_reg
; i
++)
3040 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3041 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
3042 SET_REGNO_REG_SET (&deps
->reg_last_in_use
, i
);
3045 /* Flush pending lists on jumps, but not on speculative checks. */
3046 if (JUMP_P (insn
) && !(sel_sched_p ()
3047 && sel_insn_is_speculation_check (insn
)))
3048 flush_pending_lists (deps
, insn
, true, true);
3050 if (!deps
->readonly
)
3051 CLEAR_REG_SET (&deps
->reg_conditional_sets
);
3052 reg_pending_barrier
= NOT_A_BARRIER
;
3055 /* If a post-call group is still open, see if it should remain so.
3056 This insn must be a simple move of a hard reg to a pseudo or
3059 We must avoid moving these insns for correctness on
3060 SMALL_REGISTER_CLASS machines, and for special registers like
3061 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3062 hard regs for all targets. */
3064 if (deps
->in_post_call_group_p
)
3066 rtx tmp
, set
= single_set (insn
);
3067 int src_regno
, dest_regno
;
3071 if (DEBUG_INSN_P (insn
))
3072 /* We don't want to mark debug insns as part of the same
3073 sched group. We know they really aren't, but if we use
3074 debug insns to tell that a call group is over, we'll
3075 get different code if debug insns are not there and
3076 instructions that follow seem like they should be part
3079 Also, if we did, fixup_sched_groups() would move the
3080 deps of the debug insn to the call insn, modifying
3081 non-debug post-dependency counts of the debug insn
3082 dependencies and otherwise messing with the scheduling
3085 Instead, let such debug insns be scheduled freely, but
3086 keep the call group open in case there are insns that
3087 should be part of it afterwards. Since we grant debug
3088 insns higher priority than even sched group insns, it
3089 will all turn out all right. */
3090 goto debug_dont_end_call_group
;
3092 goto end_call_group
;
3095 tmp
= SET_DEST (set
);
3096 if (GET_CODE (tmp
) == SUBREG
)
3097 tmp
= SUBREG_REG (tmp
);
3099 dest_regno
= REGNO (tmp
);
3101 goto end_call_group
;
3103 tmp
= SET_SRC (set
);
3104 if (GET_CODE (tmp
) == SUBREG
)
3105 tmp
= SUBREG_REG (tmp
);
3106 if ((GET_CODE (tmp
) == PLUS
3107 || GET_CODE (tmp
) == MINUS
)
3108 && REG_P (XEXP (tmp
, 0))
3109 && REGNO (XEXP (tmp
, 0)) == STACK_POINTER_REGNUM
3110 && dest_regno
== STACK_POINTER_REGNUM
)
3111 src_regno
= STACK_POINTER_REGNUM
;
3112 else if (REG_P (tmp
))
3113 src_regno
= REGNO (tmp
);
3115 goto end_call_group
;
3117 if (src_regno
< FIRST_PSEUDO_REGISTER
3118 || dest_regno
< FIRST_PSEUDO_REGISTER
)
3121 && deps
->in_post_call_group_p
== post_call_initial
)
3122 deps
->in_post_call_group_p
= post_call
;
3124 if (!sel_sched_p () || sched_emulate_haifa_p
)
3126 SCHED_GROUP_P (insn
) = 1;
3127 CANT_MOVE (insn
) = 1;
3133 if (!deps
->readonly
)
3134 deps
->in_post_call_group_p
= not_post_call
;
3138 debug_dont_end_call_group
:
3139 if ((current_sched_info
->flags
& DO_SPECULATION
)
3140 && !sched_insn_is_legitimate_for_speculation_p (insn
, 0))
3141 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3145 sel_mark_hard_insn (insn
);
3148 sd_iterator_def sd_it
;
3151 for (sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
3152 sd_iterator_cond (&sd_it
, &dep
);)
3153 change_spec_dep_to_hard (sd_it
);
3158 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3159 longjmp, loop forever, ...). */
3161 call_may_noreturn_p (rtx insn
)
3165 /* const or pure calls that aren't looping will always return. */
3166 if (RTL_CONST_OR_PURE_CALL_P (insn
)
3167 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn
))
3170 call
= PATTERN (insn
);
3171 if (GET_CODE (call
) == PARALLEL
)
3172 call
= XVECEXP (call
, 0, 0);
3173 if (GET_CODE (call
) == SET
)
3174 call
= SET_SRC (call
);
3175 if (GET_CODE (call
) == CALL
3176 && MEM_P (XEXP (call
, 0))
3177 && GET_CODE (XEXP (XEXP (call
, 0), 0)) == SYMBOL_REF
)
3179 rtx symbol
= XEXP (XEXP (call
, 0), 0);
3180 if (SYMBOL_REF_DECL (symbol
)
3181 && TREE_CODE (SYMBOL_REF_DECL (symbol
)) == FUNCTION_DECL
)
3183 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol
))
3185 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol
)))
3188 case BUILT_IN_BCOPY
:
3189 case BUILT_IN_BZERO
:
3190 case BUILT_IN_INDEX
:
3191 case BUILT_IN_MEMCHR
:
3192 case BUILT_IN_MEMCMP
:
3193 case BUILT_IN_MEMCPY
:
3194 case BUILT_IN_MEMMOVE
:
3195 case BUILT_IN_MEMPCPY
:
3196 case BUILT_IN_MEMSET
:
3197 case BUILT_IN_RINDEX
:
3198 case BUILT_IN_STPCPY
:
3199 case BUILT_IN_STPNCPY
:
3200 case BUILT_IN_STRCAT
:
3201 case BUILT_IN_STRCHR
:
3202 case BUILT_IN_STRCMP
:
3203 case BUILT_IN_STRCPY
:
3204 case BUILT_IN_STRCSPN
:
3205 case BUILT_IN_STRLEN
:
3206 case BUILT_IN_STRNCAT
:
3207 case BUILT_IN_STRNCMP
:
3208 case BUILT_IN_STRNCPY
:
3209 case BUILT_IN_STRPBRK
:
3210 case BUILT_IN_STRRCHR
:
3211 case BUILT_IN_STRSPN
:
3212 case BUILT_IN_STRSTR
:
3213 /* Assume certain string/memory builtins always return. */
3221 /* For all other calls assume that they might not always return. */
3225 /* Analyze INSN with DEPS as a context. */
3227 deps_analyze_insn (struct deps
*deps
, rtx insn
)
3229 if (sched_deps_info
->start_insn
)
3230 sched_deps_info
->start_insn (insn
);
3232 if (NONJUMP_INSN_P (insn
) || DEBUG_INSN_P (insn
) || JUMP_P (insn
))
3234 /* Make each JUMP_INSN (but not a speculative check)
3235 a scheduling barrier for memory references. */
3239 && sel_insn_is_speculation_check (insn
)))
3241 /* Keep the list a reasonable size. */
3242 if (deps
->pending_flush_length
++ > MAX_PENDING_LIST_LENGTH
)
3243 flush_pending_lists (deps
, insn
, true, true);
3245 deps
->last_pending_memory_flush
3246 = alloc_INSN_LIST (insn
, deps
->last_pending_memory_flush
);
3249 sched_analyze_insn (deps
, PATTERN (insn
), insn
);
3251 else if (CALL_P (insn
))
3255 CANT_MOVE (insn
) = 1;
3257 if (find_reg_note (insn
, REG_SETJMP
, NULL
))
3259 /* This is setjmp. Assume that all registers, not just
3260 hard registers, may be clobbered by this call. */
3261 reg_pending_barrier
= MOVE_BARRIER
;
3265 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
3266 /* A call may read and modify global register variables. */
3269 SET_REGNO_REG_SET (reg_pending_sets
, i
);
3270 SET_HARD_REG_BIT (implicit_reg_pending_uses
, i
);
3272 /* Other call-clobbered hard regs may be clobbered.
3273 Since we only have a choice between 'might be clobbered'
3274 and 'definitely not clobbered', we must include all
3275 partly call-clobbered registers here. */
3276 else if (HARD_REGNO_CALL_PART_CLOBBERED (i
, reg_raw_mode
[i
])
3277 || TEST_HARD_REG_BIT (regs_invalidated_by_call
, i
))
3278 SET_REGNO_REG_SET (reg_pending_clobbers
, i
);
3279 /* We don't know what set of fixed registers might be used
3280 by the function, but it is certain that the stack pointer
3281 is among them, but be conservative. */
3282 else if (fixed_regs
[i
])
3283 SET_HARD_REG_BIT (implicit_reg_pending_uses
, i
);
3284 /* The frame pointer is normally not used by the function
3285 itself, but by the debugger. */
3286 /* ??? MIPS o32 is an exception. It uses the frame pointer
3287 in the macro expansion of jal but does not represent this
3288 fact in the call_insn rtl. */
3289 else if (i
== FRAME_POINTER_REGNUM
3290 || (i
== HARD_FRAME_POINTER_REGNUM
3291 && (! reload_completed
|| frame_pointer_needed
)))
3292 SET_HARD_REG_BIT (implicit_reg_pending_uses
, i
);
3295 /* For each insn which shouldn't cross a call, add a dependence
3296 between that insn and this call insn. */
3297 add_dependence_list_and_free (deps
, insn
,
3298 &deps
->sched_before_next_call
, 1,
3301 sched_analyze_insn (deps
, PATTERN (insn
), insn
);
3303 /* If CALL would be in a sched group, then this will violate
3304 convention that sched group insns have dependencies only on the
3305 previous instruction.
3307 Of course one can say: "Hey! What about head of the sched group?"
3308 And I will answer: "Basic principles (one dep per insn) are always
3310 gcc_assert (!SCHED_GROUP_P (insn
));
3312 /* In the absence of interprocedural alias analysis, we must flush
3313 all pending reads and writes, and start new dependencies starting
3314 from here. But only flush writes for constant calls (which may
3315 be passed a pointer to something we haven't written yet). */
3316 flush_pending_lists (deps
, insn
, true, ! RTL_CONST_OR_PURE_CALL_P (insn
));
3318 if (!deps
->readonly
)
3320 /* Remember the last function call for limiting lifetimes. */
3321 free_INSN_LIST_list (&deps
->last_function_call
);
3322 deps
->last_function_call
= alloc_INSN_LIST (insn
, NULL_RTX
);
3324 if (call_may_noreturn_p (insn
))
3326 /* Remember the last function call that might not always return
3327 normally for limiting moves of trapping insns. */
3328 free_INSN_LIST_list (&deps
->last_function_call_may_noreturn
);
3329 deps
->last_function_call_may_noreturn
3330 = alloc_INSN_LIST (insn
, NULL_RTX
);
3333 /* Before reload, begin a post-call group, so as to keep the
3334 lifetimes of hard registers correct. */
3335 if (! reload_completed
)
3336 deps
->in_post_call_group_p
= post_call
;
3340 if (sched_deps_info
->use_cselib
)
3341 cselib_process_insn (insn
);
3343 /* EH_REGION insn notes can not appear until well after we complete
3346 gcc_assert (NOTE_KIND (insn
) != NOTE_INSN_EH_REGION_BEG
3347 && NOTE_KIND (insn
) != NOTE_INSN_EH_REGION_END
);
3349 if (sched_deps_info
->finish_insn
)
3350 sched_deps_info
->finish_insn ();
3352 /* Fixup the dependencies in the sched group. */
3353 if ((NONJUMP_INSN_P (insn
) || JUMP_P (insn
))
3354 && SCHED_GROUP_P (insn
) && !sel_sched_p ())
3355 fixup_sched_groups (insn
);
3358 /* Initialize DEPS for the new block beginning with HEAD. */
3360 deps_start_bb (struct deps
*deps
, rtx head
)
3362 gcc_assert (!deps
->readonly
);
3364 /* Before reload, if the previous block ended in a call, show that
3365 we are inside a post-call group, so as to keep the lifetimes of
3366 hard registers correct. */
3367 if (! reload_completed
&& !LABEL_P (head
))
3369 rtx insn
= prev_nonnote_insn (head
);
3371 while (insn
&& DEBUG_INSN_P (insn
))
3372 insn
= prev_nonnote_insn (insn
);
3373 if (insn
&& CALL_P (insn
))
3374 deps
->in_post_call_group_p
= post_call_initial
;
3378 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3379 dependencies for each insn. */
3381 sched_analyze (struct deps
*deps
, rtx head
, rtx tail
)
3385 if (sched_deps_info
->use_cselib
)
3388 deps_start_bb (deps
, head
);
3390 for (insn
= head
;; insn
= NEXT_INSN (insn
))
3395 /* And initialize deps_lists. */
3396 sd_init_insn (insn
);
3399 deps_analyze_insn (deps
, insn
);
3403 if (sched_deps_info
->use_cselib
)
3411 /* Helper for sched_free_deps ().
3412 Delete INSN's (RESOLVED_P) backward dependencies. */
3414 delete_dep_nodes_in_back_deps (rtx insn
, bool resolved_p
)
3416 sd_iterator_def sd_it
;
3418 sd_list_types_def types
;
3421 types
= SD_LIST_RES_BACK
;
3423 types
= SD_LIST_BACK
;
3425 for (sd_it
= sd_iterator_start (insn
, types
);
3426 sd_iterator_cond (&sd_it
, &dep
);)
3428 dep_link_t link
= *sd_it
.linkp
;
3429 dep_node_t node
= DEP_LINK_NODE (link
);
3430 deps_list_t back_list
;
3431 deps_list_t forw_list
;
3433 get_back_and_forw_lists (dep
, resolved_p
, &back_list
, &forw_list
);
3434 remove_from_deps_list (link
, back_list
);
3435 delete_dep_node (node
);
3439 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3442 sched_free_deps (rtx head
, rtx tail
, bool resolved_p
)
3445 rtx next_tail
= NEXT_INSN (tail
);
3447 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
3448 if (INSN_P (insn
) && INSN_LUID (insn
) > 0)
3450 /* Clear resolved back deps together with its dep_nodes. */
3451 delete_dep_nodes_in_back_deps (insn
, resolved_p
);
3453 /* Clear forward deps and leave the dep_nodes to the
3454 corresponding back_deps list. */
3456 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn
));
3458 clear_deps_list (INSN_FORW_DEPS (insn
));
3460 sd_finish_insn (insn
);
3464 /* Initialize variables for region data dependence analysis.
3465 When LAZY_REG_LAST is true, do not allocate reg_last array
3466 of struct deps immediately. */
3469 init_deps (struct deps
*deps
, bool lazy_reg_last
)
3471 int max_reg
= (reload_completed
? FIRST_PSEUDO_REGISTER
: max_reg_num ());
3473 deps
->max_reg
= max_reg
;
3475 deps
->reg_last
= NULL
;
3477 deps
->reg_last
= XCNEWVEC (struct deps_reg
, max_reg
);
3478 INIT_REG_SET (&deps
->reg_last_in_use
);
3479 INIT_REG_SET (&deps
->reg_conditional_sets
);
3481 deps
->pending_read_insns
= 0;
3482 deps
->pending_read_mems
= 0;
3483 deps
->pending_write_insns
= 0;
3484 deps
->pending_write_mems
= 0;
3485 deps
->pending_read_list_length
= 0;
3486 deps
->pending_write_list_length
= 0;
3487 deps
->pending_flush_length
= 0;
3488 deps
->last_pending_memory_flush
= 0;
3489 deps
->last_function_call
= 0;
3490 deps
->last_function_call_may_noreturn
= 0;
3491 deps
->sched_before_next_call
= 0;
3492 deps
->in_post_call_group_p
= not_post_call
;
3493 deps
->last_debug_insn
= 0;
3494 deps
->last_reg_pending_barrier
= NOT_A_BARRIER
;
3498 /* Init only reg_last field of DEPS, which was not allocated before as
3499 we inited DEPS lazily. */
3501 init_deps_reg_last (struct deps
*deps
)
3503 gcc_assert (deps
&& deps
->max_reg
> 0);
3504 gcc_assert (deps
->reg_last
== NULL
);
3506 deps
->reg_last
= XCNEWVEC (struct deps_reg
, deps
->max_reg
);
3510 /* Free insn lists found in DEPS. */
3513 free_deps (struct deps
*deps
)
3516 reg_set_iterator rsi
;
3518 /* We set max_reg to 0 when this context was already freed. */
3519 if (deps
->max_reg
== 0)
3521 gcc_assert (deps
->reg_last
== NULL
);
3526 free_INSN_LIST_list (&deps
->pending_read_insns
);
3527 free_EXPR_LIST_list (&deps
->pending_read_mems
);
3528 free_INSN_LIST_list (&deps
->pending_write_insns
);
3529 free_EXPR_LIST_list (&deps
->pending_write_mems
);
3530 free_INSN_LIST_list (&deps
->last_pending_memory_flush
);
3532 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3533 times. For a testcase with 42000 regs and 8000 small basic blocks,
3534 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
3535 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
, rsi
)
3537 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3539 free_INSN_LIST_list (®_last
->uses
);
3541 free_INSN_LIST_list (®_last
->sets
);
3542 if (reg_last
->implicit_sets
)
3543 free_INSN_LIST_list (®_last
->implicit_sets
);
3544 if (reg_last
->clobbers
)
3545 free_INSN_LIST_list (®_last
->clobbers
);
3547 CLEAR_REG_SET (&deps
->reg_last_in_use
);
3548 CLEAR_REG_SET (&deps
->reg_conditional_sets
);
3550 /* As we initialize reg_last lazily, it is possible that we didn't allocate
3553 free (deps
->reg_last
);
3554 deps
->reg_last
= NULL
;
3559 /* Remove INSN from dependence contexts DEPS. Caution: reg_conditional_sets
3562 remove_from_deps (struct deps
*deps
, rtx insn
)
3566 reg_set_iterator rsi
;
3568 removed
= remove_from_both_dependence_lists (insn
, &deps
->pending_read_insns
,
3569 &deps
->pending_read_mems
);
3570 if (!DEBUG_INSN_P (insn
))
3571 deps
->pending_read_list_length
-= removed
;
3572 removed
= remove_from_both_dependence_lists (insn
, &deps
->pending_write_insns
,
3573 &deps
->pending_write_mems
);
3574 deps
->pending_write_list_length
-= removed
;
3575 removed
= remove_from_dependence_list (insn
, &deps
->last_pending_memory_flush
);
3576 deps
->pending_flush_length
-= removed
;
3578 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
, rsi
)
3580 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
3582 remove_from_dependence_list (insn
, ®_last
->uses
);
3584 remove_from_dependence_list (insn
, ®_last
->sets
);
3585 if (reg_last
->implicit_sets
)
3586 remove_from_dependence_list (insn
, ®_last
->implicit_sets
);
3587 if (reg_last
->clobbers
)
3588 remove_from_dependence_list (insn
, ®_last
->clobbers
);
3589 if (!reg_last
->uses
&& !reg_last
->sets
&& !reg_last
->implicit_sets
3590 && !reg_last
->clobbers
)
3591 CLEAR_REGNO_REG_SET (&deps
->reg_last_in_use
, i
);
3596 remove_from_dependence_list (insn
, &deps
->last_function_call
);
3597 remove_from_dependence_list (insn
,
3598 &deps
->last_function_call_may_noreturn
);
3600 remove_from_dependence_list (insn
, &deps
->sched_before_next_call
);
3603 /* Init deps data vector. */
3605 init_deps_data_vector (void)
3607 int reserve
= (sched_max_luid
+ 1
3608 - VEC_length (haifa_deps_insn_data_def
, h_d_i_d
));
3610 && ! VEC_space (haifa_deps_insn_data_def
, h_d_i_d
, reserve
))
3611 VEC_safe_grow_cleared (haifa_deps_insn_data_def
, heap
, h_d_i_d
,
3612 3 * sched_max_luid
/ 2);
3615 /* If it is profitable to use them, initialize or extend (depending on
3616 GLOBAL_P) dependency data. */
3618 sched_deps_init (bool global_p
)
3620 /* Average number of insns in the basic block.
3621 '+ 1' is used to make it nonzero. */
3622 int insns_in_block
= sched_max_luid
/ n_basic_blocks
+ 1;
3624 init_deps_data_vector ();
3626 /* We use another caching mechanism for selective scheduling, so
3627 we don't use this one. */
3628 if (!sel_sched_p () && global_p
&& insns_in_block
> 100 * 5)
3630 /* ?!? We could save some memory by computing a per-region luid mapping
3631 which could reduce both the number of vectors in the cache and the
3632 size of each vector. Instead we just avoid the cache entirely unless
3633 the average number of instructions in a basic block is very high. See
3634 the comment before the declaration of true_dependency_cache for
3635 what we consider "very high". */
3637 extend_dependency_caches (sched_max_luid
, true);
3642 dl_pool
= create_alloc_pool ("deps_list", sizeof (struct _deps_list
),
3643 /* Allocate lists for one block at a time. */
3645 dn_pool
= create_alloc_pool ("dep_node", sizeof (struct _dep_node
),
3646 /* Allocate nodes for one block at a time.
3647 We assume that average insn has
3649 5 * insns_in_block
);
3654 /* Create or extend (depending on CREATE_P) dependency caches to
3657 extend_dependency_caches (int n
, bool create_p
)
3659 if (create_p
|| true_dependency_cache
)
3661 int i
, luid
= cache_size
+ n
;
3663 true_dependency_cache
= XRESIZEVEC (bitmap_head
, true_dependency_cache
,
3665 output_dependency_cache
= XRESIZEVEC (bitmap_head
,
3666 output_dependency_cache
, luid
);
3667 anti_dependency_cache
= XRESIZEVEC (bitmap_head
, anti_dependency_cache
,
3670 if (current_sched_info
->flags
& DO_SPECULATION
)
3671 spec_dependency_cache
= XRESIZEVEC (bitmap_head
, spec_dependency_cache
,
3674 for (i
= cache_size
; i
< luid
; i
++)
3676 bitmap_initialize (&true_dependency_cache
[i
], 0);
3677 bitmap_initialize (&output_dependency_cache
[i
], 0);
3678 bitmap_initialize (&anti_dependency_cache
[i
], 0);
3680 if (current_sched_info
->flags
& DO_SPECULATION
)
3681 bitmap_initialize (&spec_dependency_cache
[i
], 0);
3687 /* Finalize dependency information for the whole function. */
3689 sched_deps_finish (void)
3691 gcc_assert (deps_pools_are_empty_p ());
3692 free_alloc_pool_if_empty (&dn_pool
);
3693 free_alloc_pool_if_empty (&dl_pool
);
3694 gcc_assert (dn_pool
== NULL
&& dl_pool
== NULL
);
3696 VEC_free (haifa_deps_insn_data_def
, heap
, h_d_i_d
);
3699 if (true_dependency_cache
)
3703 for (i
= 0; i
< cache_size
; i
++)
3705 bitmap_clear (&true_dependency_cache
[i
]);
3706 bitmap_clear (&output_dependency_cache
[i
]);
3707 bitmap_clear (&anti_dependency_cache
[i
]);
3709 if (sched_deps_info
->generate_spec_deps
)
3710 bitmap_clear (&spec_dependency_cache
[i
]);
3712 free (true_dependency_cache
);
3713 true_dependency_cache
= NULL
;
3714 free (output_dependency_cache
);
3715 output_dependency_cache
= NULL
;
3716 free (anti_dependency_cache
);
3717 anti_dependency_cache
= NULL
;
3719 if (sched_deps_info
->generate_spec_deps
)
3721 free (spec_dependency_cache
);
3722 spec_dependency_cache
= NULL
;
3728 /* Initialize some global variables needed by the dependency analysis
3732 init_deps_global (void)
3734 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers
);
3735 CLEAR_HARD_REG_SET (implicit_reg_pending_uses
);
3736 reg_pending_sets
= ALLOC_REG_SET (®_obstack
);
3737 reg_pending_clobbers
= ALLOC_REG_SET (®_obstack
);
3738 reg_pending_uses
= ALLOC_REG_SET (®_obstack
);
3739 reg_pending_barrier
= NOT_A_BARRIER
;
3741 if (!sel_sched_p () || sched_emulate_haifa_p
)
3743 sched_deps_info
->start_insn
= haifa_start_insn
;
3744 sched_deps_info
->finish_insn
= haifa_finish_insn
;
3746 sched_deps_info
->note_reg_set
= haifa_note_reg_set
;
3747 sched_deps_info
->note_reg_clobber
= haifa_note_reg_clobber
;
3748 sched_deps_info
->note_reg_use
= haifa_note_reg_use
;
3750 sched_deps_info
->note_mem_dep
= haifa_note_mem_dep
;
3751 sched_deps_info
->note_dep
= haifa_note_dep
;
3755 /* Free everything used by the dependency analysis code. */
3758 finish_deps_global (void)
3760 FREE_REG_SET (reg_pending_sets
);
3761 FREE_REG_SET (reg_pending_clobbers
);
3762 FREE_REG_SET (reg_pending_uses
);
3765 /* Estimate the weakness of dependence between MEM1 and MEM2. */
3767 estimate_dep_weak (rtx mem1
, rtx mem2
)
3772 /* MEMs are the same - don't speculate. */
3773 return MIN_DEP_WEAK
;
3775 r1
= XEXP (mem1
, 0);
3776 r2
= XEXP (mem2
, 0);
3779 || (REG_P (r1
) && REG_P (r2
)
3780 && REGNO (r1
) == REGNO (r2
)))
3781 /* Again, MEMs are the same. */
3782 return MIN_DEP_WEAK
;
3783 else if ((REG_P (r1
) && !REG_P (r2
))
3784 || (!REG_P (r1
) && REG_P (r2
)))
3785 /* Different addressing modes - reason to be more speculative,
3787 return NO_DEP_WEAK
- (NO_DEP_WEAK
- UNCERTAIN_DEP_WEAK
) / 2;
3789 /* We can't say anything about the dependence. */
3790 return UNCERTAIN_DEP_WEAK
;
3793 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
3794 This function can handle same INSN and ELEM (INSN == ELEM).
3795 It is a convenience wrapper. */
3797 add_dependence (rtx insn
, rtx elem
, enum reg_note dep_type
)
3802 if (dep_type
== REG_DEP_TRUE
)
3804 else if (dep_type
== REG_DEP_OUTPUT
)
3808 gcc_assert (dep_type
== REG_DEP_ANTI
);
3812 /* When add_dependence is called from inside sched-deps.c, we expect
3813 cur_insn to be non-null. */
3814 internal
= cur_insn
!= NULL
;
3816 gcc_assert (insn
== cur_insn
);
3820 note_dep (elem
, ds
);
3825 /* Return weakness of speculative type TYPE in the dep_status DS. */
3827 get_dep_weak_1 (ds_t ds
, ds_t type
)
3833 case BEGIN_DATA
: ds
>>= BEGIN_DATA_BITS_OFFSET
; break;
3834 case BE_IN_DATA
: ds
>>= BE_IN_DATA_BITS_OFFSET
; break;
3835 case BEGIN_CONTROL
: ds
>>= BEGIN_CONTROL_BITS_OFFSET
; break;
3836 case BE_IN_CONTROL
: ds
>>= BE_IN_CONTROL_BITS_OFFSET
; break;
3837 default: gcc_unreachable ();
3844 get_dep_weak (ds_t ds
, ds_t type
)
3846 dw_t dw
= get_dep_weak_1 (ds
, type
);
3848 gcc_assert (MIN_DEP_WEAK
<= dw
&& dw
<= MAX_DEP_WEAK
);
3852 /* Return the dep_status, which has the same parameters as DS, except for
3853 speculative type TYPE, that will have weakness DW. */
3855 set_dep_weak (ds_t ds
, ds_t type
, dw_t dw
)
3857 gcc_assert (MIN_DEP_WEAK
<= dw
&& dw
<= MAX_DEP_WEAK
);
3862 case BEGIN_DATA
: ds
|= ((ds_t
) dw
) << BEGIN_DATA_BITS_OFFSET
; break;
3863 case BE_IN_DATA
: ds
|= ((ds_t
) dw
) << BE_IN_DATA_BITS_OFFSET
; break;
3864 case BEGIN_CONTROL
: ds
|= ((ds_t
) dw
) << BEGIN_CONTROL_BITS_OFFSET
; break;
3865 case BE_IN_CONTROL
: ds
|= ((ds_t
) dw
) << BE_IN_CONTROL_BITS_OFFSET
; break;
3866 default: gcc_unreachable ();
3871 /* Return the join of two dep_statuses DS1 and DS2.
3872 If MAX_P is true then choose the greater probability,
3873 otherwise multiply probabilities.
3874 This function assumes that both DS1 and DS2 contain speculative bits. */
3876 ds_merge_1 (ds_t ds1
, ds_t ds2
, bool max_p
)
3880 gcc_assert ((ds1
& SPECULATIVE
) && (ds2
& SPECULATIVE
));
3882 ds
= (ds1
& DEP_TYPES
) | (ds2
& DEP_TYPES
);
3884 t
= FIRST_SPEC_TYPE
;
3887 if ((ds1
& t
) && !(ds2
& t
))
3889 else if (!(ds1
& t
) && (ds2
& t
))
3891 else if ((ds1
& t
) && (ds2
& t
))
3893 dw_t dw1
= get_dep_weak (ds1
, t
);
3894 dw_t dw2
= get_dep_weak (ds2
, t
);
3899 dw
= ((ds_t
) dw1
) * ((ds_t
) dw2
);
3901 if (dw
< MIN_DEP_WEAK
)
3912 ds
= set_dep_weak (ds
, t
, (dw_t
) dw
);
3915 if (t
== LAST_SPEC_TYPE
)
3917 t
<<= SPEC_TYPE_SHIFT
;
3924 /* Return the join of two dep_statuses DS1 and DS2.
3925 This function assumes that both DS1 and DS2 contain speculative bits. */
3927 ds_merge (ds_t ds1
, ds_t ds2
)
3929 return ds_merge_1 (ds1
, ds2
, false);
3932 /* Return the join of two dep_statuses DS1 and DS2. */
3934 ds_full_merge (ds_t ds
, ds_t ds2
, rtx mem1
, rtx mem2
)
3936 ds_t new_status
= ds
| ds2
;
3938 if (new_status
& SPECULATIVE
)
3940 if ((ds
&& !(ds
& SPECULATIVE
))
3941 || (ds2
&& !(ds2
& SPECULATIVE
)))
3942 /* Then this dep can't be speculative. */
3943 new_status
&= ~SPECULATIVE
;
3946 /* Both are speculative. Merging probabilities. */
3951 dw
= estimate_dep_weak (mem1
, mem2
);
3952 ds
= set_dep_weak (ds
, BEGIN_DATA
, dw
);
3960 new_status
= ds_merge (ds2
, ds
);
3967 /* Return the join of DS1 and DS2. Use maximum instead of multiplying
3970 ds_max_merge (ds_t ds1
, ds_t ds2
)
3972 if (ds1
== 0 && ds2
== 0)
3975 if (ds1
== 0 && ds2
!= 0)
3978 if (ds1
!= 0 && ds2
== 0)
3981 return ds_merge_1 (ds1
, ds2
, true);
3984 /* Return the probability of speculation success for the speculation
3992 dt
= FIRST_SPEC_TYPE
;
3997 res
*= (ds_t
) get_dep_weak (ds
, dt
);
4001 if (dt
== LAST_SPEC_TYPE
)
4003 dt
<<= SPEC_TYPE_SHIFT
;
4009 res
/= MAX_DEP_WEAK
;
4011 if (res
< MIN_DEP_WEAK
)
4014 gcc_assert (res
<= MAX_DEP_WEAK
);
4019 /* Return a dep status that contains all speculation types of DS. */
4021 ds_get_speculation_types (ds_t ds
)
4023 if (ds
& BEGIN_DATA
)
4025 if (ds
& BE_IN_DATA
)
4027 if (ds
& BEGIN_CONTROL
)
4028 ds
|= BEGIN_CONTROL
;
4029 if (ds
& BE_IN_CONTROL
)
4030 ds
|= BE_IN_CONTROL
;
4032 return ds
& SPECULATIVE
;
4035 /* Return a dep status that contains maximal weakness for each speculation
4036 type present in DS. */
4038 ds_get_max_dep_weak (ds_t ds
)
4040 if (ds
& BEGIN_DATA
)
4041 ds
= set_dep_weak (ds
, BEGIN_DATA
, MAX_DEP_WEAK
);
4042 if (ds
& BE_IN_DATA
)
4043 ds
= set_dep_weak (ds
, BE_IN_DATA
, MAX_DEP_WEAK
);
4044 if (ds
& BEGIN_CONTROL
)
4045 ds
= set_dep_weak (ds
, BEGIN_CONTROL
, MAX_DEP_WEAK
);
4046 if (ds
& BE_IN_CONTROL
)
4047 ds
= set_dep_weak (ds
, BE_IN_CONTROL
, MAX_DEP_WEAK
);
4052 /* Dump information about the dependence status S. */
4054 dump_ds (FILE *f
, ds_t s
)
4059 fprintf (f
, "BEGIN_DATA: %d; ", get_dep_weak_1 (s
, BEGIN_DATA
));
4061 fprintf (f
, "BE_IN_DATA: %d; ", get_dep_weak_1 (s
, BE_IN_DATA
));
4062 if (s
& BEGIN_CONTROL
)
4063 fprintf (f
, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s
, BEGIN_CONTROL
));
4064 if (s
& BE_IN_CONTROL
)
4065 fprintf (f
, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s
, BE_IN_CONTROL
));
4068 fprintf (f
, "HARD_DEP; ");
4071 fprintf (f
, "DEP_TRUE; ");
4073 fprintf (f
, "DEP_ANTI; ");
4075 fprintf (f
, "DEP_OUTPUT; ");
4083 dump_ds (stderr
, s
);
4084 fprintf (stderr
, "\n");
4087 #ifdef ENABLE_CHECKING
4088 /* Verify that dependence type and status are consistent.
4089 If RELAXED_P is true, then skip dep_weakness checks. */
4091 check_dep (dep_t dep
, bool relaxed_p
)
4093 enum reg_note dt
= DEP_TYPE (dep
);
4094 ds_t ds
= DEP_STATUS (dep
);
4096 gcc_assert (DEP_PRO (dep
) != DEP_CON (dep
));
4098 if (!(current_sched_info
->flags
& USE_DEPS_LIST
))
4100 gcc_assert (ds
== -1);
4104 /* Check that dependence type contains the same bits as the status. */
4105 if (dt
== REG_DEP_TRUE
)
4106 gcc_assert (ds
& DEP_TRUE
);
4107 else if (dt
== REG_DEP_OUTPUT
)
4108 gcc_assert ((ds
& DEP_OUTPUT
)
4109 && !(ds
& DEP_TRUE
));
4111 gcc_assert ((dt
== REG_DEP_ANTI
)
4113 && !(ds
& (DEP_OUTPUT
| DEP_TRUE
)));
4115 /* HARD_DEP can not appear in dep_status of a link. */
4116 gcc_assert (!(ds
& HARD_DEP
));
4118 /* Check that dependence status is set correctly when speculation is not
4120 if (!sched_deps_info
->generate_spec_deps
)
4121 gcc_assert (!(ds
& SPECULATIVE
));
4122 else if (ds
& SPECULATIVE
)
4126 ds_t type
= FIRST_SPEC_TYPE
;
4128 /* Check that dependence weakness is in proper range. */
4132 get_dep_weak (ds
, type
);
4134 if (type
== LAST_SPEC_TYPE
)
4136 type
<<= SPEC_TYPE_SHIFT
;
4141 if (ds
& BEGIN_SPEC
)
4143 /* Only true dependence can be data speculative. */
4144 if (ds
& BEGIN_DATA
)
4145 gcc_assert (ds
& DEP_TRUE
);
4147 /* Control dependencies in the insn scheduler are represented by
4148 anti-dependencies, therefore only anti dependence can be
4149 control speculative. */
4150 if (ds
& BEGIN_CONTROL
)
4151 gcc_assert (ds
& DEP_ANTI
);
4155 /* Subsequent speculations should resolve true dependencies. */
4156 gcc_assert ((ds
& DEP_TYPES
) == DEP_TRUE
);
4159 /* Check that true and anti dependencies can't have other speculative
4162 gcc_assert (ds
& (BEGIN_DATA
| BE_IN_SPEC
));
4163 /* An output dependence can't be speculative at all. */
4164 gcc_assert (!(ds
& DEP_OUTPUT
));
4166 gcc_assert (ds
& BEGIN_CONTROL
);
4169 #endif /* ENABLE_CHECKING */
4171 #endif /* INSN_SCHEDULING */