1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
7 and currently maintained by, Jim Wilson (wilson@cygnus.com)
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 2, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
37 #include "insn-config.h"
38 #include "insn-attr.h"
42 #include "sched-int.h"
48 static regset reg_pending_sets
;
49 static regset reg_pending_clobbers
;
50 static regset reg_pending_uses
;
52 /* The following enumeration values tell us what dependencies we
53 should use to implement the barrier. We use true-dependencies for
54 TRUE_BARRIER and anti-dependencies for MOVE_BARRIER. */
55 enum reg_pending_barrier_mode
62 static enum reg_pending_barrier_mode reg_pending_barrier
;
64 /* To speed up the test for duplicate dependency links we keep a
65 record of dependencies created by add_dependence when the average
66 number of instructions in a basic block is very large.
68 Studies have shown that there is typically around 5 instructions between
69 branches for typical C code. So we can make a guess that the average
70 basic block is approximately 5 instructions long; we will choose 100X
71 the average size as a very large basic block.
73 Each insn has associated bitmaps for its dependencies. Each bitmap
74 has enough entries to represent a dependency on any other insn in
75 the insn chain. All bitmap for true dependencies cache is
76 allocated then the rest two ones are also allocated. */
77 static bitmap_head
*true_dependency_cache
;
78 static bitmap_head
*output_dependency_cache
;
79 static bitmap_head
*anti_dependency_cache
;
80 static bitmap_head
*spec_dependency_cache
;
81 static int cache_size
;
83 /* To speed up checking consistency of formed forward insn
84 dependencies we use the following cache. Another possible solution
85 could be switching off checking duplication of insns in forward
87 #ifdef ENABLE_CHECKING
88 static bitmap_head
*forward_dependency_cache
;
91 static int deps_may_trap_p (rtx
);
92 static void add_dependence_list (rtx
, rtx
, int, enum reg_note
);
93 static void add_dependence_list_and_free (rtx
, rtx
*, int, enum reg_note
);
94 static void delete_all_dependences (rtx
);
95 static void fixup_sched_groups (rtx
);
97 static void flush_pending_lists (struct deps
*, rtx
, int, int);
98 static void sched_analyze_1 (struct deps
*, rtx
, rtx
);
99 static void sched_analyze_2 (struct deps
*, rtx
, rtx
);
100 static void sched_analyze_insn (struct deps
*, rtx
, rtx
);
102 static rtx
sched_get_condition (rtx
);
103 static int conditions_mutex_p (rtx
, rtx
);
105 static enum DEPS_ADJUST_RESULT
maybe_add_or_update_back_dep_1 (rtx
, rtx
,
106 enum reg_note
, ds_t
, rtx
, rtx
, rtx
**);
107 static enum DEPS_ADJUST_RESULT
add_or_update_back_dep_1 (rtx
, rtx
,
108 enum reg_note
, ds_t
, rtx
, rtx
, rtx
**);
109 static void add_back_dep (rtx
, rtx
, enum reg_note
, ds_t
);
111 static void adjust_add_sorted_back_dep (rtx
, rtx
, rtx
*);
112 static void adjust_back_add_forw_dep (rtx
, rtx
*);
113 static void delete_forw_dep (rtx
, rtx
);
114 static dw_t
estimate_dep_weak (rtx
, rtx
);
115 #ifdef INSN_SCHEDULING
116 #ifdef ENABLE_CHECKING
117 static void check_dep_status (enum reg_note
, ds_t
, bool);
121 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
124 deps_may_trap_p (rtx mem
)
126 rtx addr
= XEXP (mem
, 0);
128 if (REG_P (addr
) && REGNO (addr
) >= FIRST_PSEUDO_REGISTER
)
130 rtx t
= get_reg_known_value (REGNO (addr
));
134 return rtx_addr_can_trap_p (addr
);
137 /* Return the INSN_LIST containing INSN in LIST, or NULL
138 if LIST does not contain INSN. */
141 find_insn_list (rtx insn
, rtx list
)
145 if (XEXP (list
, 0) == insn
)
147 list
= XEXP (list
, 1);
152 /* Find the condition under which INSN is executed. */
155 sched_get_condition (rtx insn
)
157 rtx pat
= PATTERN (insn
);
163 if (GET_CODE (pat
) == COND_EXEC
)
164 return COND_EXEC_TEST (pat
);
166 if (!any_condjump_p (insn
) || !onlyjump_p (insn
))
169 src
= SET_SRC (pc_set (insn
));
171 if (XEXP (src
, 2) == pc_rtx
)
172 return XEXP (src
, 0);
173 else if (XEXP (src
, 1) == pc_rtx
)
175 rtx cond
= XEXP (src
, 0);
176 enum rtx_code revcode
= reversed_comparison_code (cond
, insn
);
178 if (revcode
== UNKNOWN
)
180 return gen_rtx_fmt_ee (revcode
, GET_MODE (cond
), XEXP (cond
, 0),
188 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
191 conditions_mutex_p (rtx cond1
, rtx cond2
)
193 if (COMPARISON_P (cond1
)
194 && COMPARISON_P (cond2
)
195 && GET_CODE (cond1
) == reversed_comparison_code (cond2
, NULL
)
196 && XEXP (cond1
, 0) == XEXP (cond2
, 0)
197 && XEXP (cond1
, 1) == XEXP (cond2
, 1))
202 /* Return true if insn1 and insn2 can never depend on one another because
203 the conditions under which they are executed are mutually exclusive. */
205 sched_insns_conditions_mutex_p (rtx insn1
, rtx insn2
)
209 /* flow.c doesn't handle conditional lifetimes entirely correctly;
210 calls mess up the conditional lifetimes. */
211 if (!CALL_P (insn1
) && !CALL_P (insn2
))
213 cond1
= sched_get_condition (insn1
);
214 cond2
= sched_get_condition (insn2
);
216 && conditions_mutex_p (cond1
, cond2
)
217 /* Make sure first instruction doesn't affect condition of second
218 instruction if switched. */
219 && !modified_in_p (cond1
, insn2
)
220 /* Make sure second instruction doesn't affect condition of first
221 instruction if switched. */
222 && !modified_in_p (cond2
, insn1
))
228 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
229 LOG_LINKS of INSN, if it is not already there. DEP_TYPE indicates the
230 type of dependence that this link represents. DS, if nonzero,
231 indicates speculations, through which this dependence can be overcome.
232 MEM1 and MEM2, if non-null, corresponds to memory locations in case of
233 data speculation. The function returns a value indicating if an old entry
234 has been changed or a new entry has been added to insn's LOG_LINK.
235 In case of changed entry CHANGED_LINKPP sets to its address.
236 See also the definition of enum DEPS_ADJUST_RESULT in sched-int.h.
237 Actual manipulation of dependence data structures is performed in
238 add_or_update_back_dep_1. */
240 static enum DEPS_ADJUST_RESULT
241 maybe_add_or_update_back_dep_1 (rtx insn
, rtx elem
, enum reg_note dep_type
,
242 ds_t ds
, rtx mem1
, rtx mem2
,
243 rtx
**changed_linkpp
)
245 gcc_assert (INSN_P (insn
) && INSN_P (elem
));
247 /* Don't depend an insn on itself. */
250 #ifdef INSN_SCHEDULING
251 if (current_sched_info
->flags
& DO_SPECULATION
)
252 /* INSN has an internal dependence, which we can't overcome. */
253 HAS_INTERNAL_DEP (insn
) = 1;
258 return add_or_update_back_dep_1 (insn
, elem
, dep_type
,
259 ds
, mem1
, mem2
, changed_linkpp
);
262 /* This function has the same meaning of parameters and return values
263 as maybe_add_or_update_back_dep_1. The only difference between these
264 two functions is that INSN and ELEM are guaranteed not to be the same
266 static enum DEPS_ADJUST_RESULT
267 add_or_update_back_dep_1 (rtx insn
, rtx elem
, enum reg_note dep_type
,
268 ds_t ds ATTRIBUTE_UNUSED
,
269 rtx mem1 ATTRIBUTE_UNUSED
, rtx mem2 ATTRIBUTE_UNUSED
,
270 rtx
**changed_linkpp ATTRIBUTE_UNUSED
)
272 bool maybe_present_p
= true, present_p
= false;
274 gcc_assert (INSN_P (insn
) && INSN_P (elem
) && insn
!= elem
);
276 #ifdef INSN_SCHEDULING
278 #ifdef ENABLE_CHECKING
279 check_dep_status (dep_type
, ds
, mem1
!= NULL
);
282 /* If we already have a dependency for ELEM, then we do not need to
283 do anything. Avoiding the list walk below can cut compile times
284 dramatically for some code. */
285 if (true_dependency_cache
!= NULL
)
287 enum reg_note present_dep_type
;
289 gcc_assert (output_dependency_cache
);
290 gcc_assert (anti_dependency_cache
);
291 if (!(current_sched_info
->flags
& USE_DEPS_LIST
))
293 if (bitmap_bit_p (&true_dependency_cache
[INSN_LUID (insn
)],
295 present_dep_type
= REG_DEP_TRUE
;
296 else if (bitmap_bit_p (&output_dependency_cache
[INSN_LUID (insn
)],
298 present_dep_type
= REG_DEP_OUTPUT
;
299 else if (bitmap_bit_p (&anti_dependency_cache
[INSN_LUID (insn
)],
301 present_dep_type
= REG_DEP_ANTI
;
303 maybe_present_p
= false;
307 if ((int) dep_type
>= (int) present_dep_type
)
315 ds_t present_dep_types
= 0;
317 if (bitmap_bit_p (&true_dependency_cache
[INSN_LUID (insn
)],
319 present_dep_types
|= DEP_TRUE
;
320 if (bitmap_bit_p (&output_dependency_cache
[INSN_LUID (insn
)],
322 present_dep_types
|= DEP_OUTPUT
;
323 if (bitmap_bit_p (&anti_dependency_cache
[INSN_LUID (insn
)],
325 present_dep_types
|= DEP_ANTI
;
327 if (present_dep_types
)
329 if (!(current_sched_info
->flags
& DO_SPECULATION
)
330 || !bitmap_bit_p (&spec_dependency_cache
[INSN_LUID (insn
)],
333 if ((present_dep_types
| (ds
& DEP_TYPES
))
334 == present_dep_types
)
335 /* We already have all these bits. */
340 /* Only true dependencies can be data speculative and
341 only anti dependencies can be control speculative. */
342 gcc_assert ((present_dep_types
& (DEP_TRUE
| DEP_ANTI
))
343 == present_dep_types
);
345 /* if (additional dep is SPECULATIVE) then
346 we should update DEP_STATUS
348 we should reset existing dep to non-speculative. */
354 maybe_present_p
= false;
359 /* Check that we don't already have this dependence. */
364 for (linkp
= &LOG_LINKS (insn
); *linkp
; linkp
= &XEXP (*linkp
, 1))
368 gcc_assert (true_dependency_cache
== 0 || present_p
);
370 if (XEXP (link
, 0) == elem
)
372 enum DEPS_ADJUST_RESULT changed_p
= DEP_PRESENT
;
374 #ifdef INSN_SCHEDULING
375 if (current_sched_info
->flags
& USE_DEPS_LIST
)
377 ds_t new_status
= ds
| DEP_STATUS (link
);
379 if (new_status
& SPECULATIVE
)
381 if (!(ds
& SPECULATIVE
)
382 || !(DEP_STATUS (link
) & SPECULATIVE
))
383 /* Then this dep can't be speculative. */
385 new_status
&= ~SPECULATIVE
;
386 if (true_dependency_cache
387 && (DEP_STATUS (link
) & SPECULATIVE
))
388 bitmap_clear_bit (&spec_dependency_cache
394 /* Both are speculative. Merging probabilities. */
399 dw
= estimate_dep_weak (mem1
, mem2
);
400 ds
= set_dep_weak (ds
, BEGIN_DATA
, dw
);
403 new_status
= ds_merge (DEP_STATUS (link
), ds
);
410 /* Clear corresponding cache entry because type of the link
411 may have changed. Keep them if we use_deps_list. */
412 if (true_dependency_cache
!= NULL
413 && !(current_sched_info
->flags
& USE_DEPS_LIST
))
415 enum reg_note kind
= REG_NOTE_KIND (link
);
420 bitmap_clear_bit (&output_dependency_cache
421 [INSN_LUID (insn
)], INSN_LUID (elem
));
424 bitmap_clear_bit (&anti_dependency_cache
425 [INSN_LUID (insn
)], INSN_LUID (elem
));
432 if ((current_sched_info
->flags
& USE_DEPS_LIST
)
433 && DEP_STATUS (link
) != ds
)
435 DEP_STATUS (link
) = ds
;
436 changed_p
= DEP_CHANGED
;
440 /* If this is a more restrictive type of dependence than the
441 existing one, then change the existing dependence to this
443 if ((int) dep_type
< (int) REG_NOTE_KIND (link
))
445 PUT_REG_NOTE_KIND (link
, dep_type
);
446 changed_p
= DEP_CHANGED
;
449 #ifdef INSN_SCHEDULING
450 /* If we are adding a dependency to INSN's LOG_LINKs, then
451 note that in the bitmap caches of dependency information. */
452 if (true_dependency_cache
!= NULL
)
454 if (!(current_sched_info
->flags
& USE_DEPS_LIST
))
456 if (REG_NOTE_KIND (link
) == REG_DEP_TRUE
)
457 bitmap_set_bit (&true_dependency_cache
458 [INSN_LUID (insn
)], INSN_LUID (elem
));
459 else if (REG_NOTE_KIND (link
) == REG_DEP_OUTPUT
)
460 bitmap_set_bit (&output_dependency_cache
461 [INSN_LUID (insn
)], INSN_LUID (elem
));
462 else if (REG_NOTE_KIND (link
) == REG_DEP_ANTI
)
463 bitmap_set_bit (&anti_dependency_cache
464 [INSN_LUID (insn
)], INSN_LUID (elem
));
469 bitmap_set_bit (&true_dependency_cache
470 [INSN_LUID (insn
)], INSN_LUID (elem
));
472 bitmap_set_bit (&output_dependency_cache
473 [INSN_LUID (insn
)], INSN_LUID (elem
));
475 bitmap_set_bit (&anti_dependency_cache
476 [INSN_LUID (insn
)], INSN_LUID (elem
));
477 /* Note, that dep can become speculative only
478 at the moment of creation. Thus, we don't need to
479 check for it here. */
483 if (changed_linkpp
&& changed_p
== DEP_CHANGED
)
484 *changed_linkpp
= linkp
;
489 /* We didn't find a dep. It shouldn't be present in the cache. */
490 gcc_assert (!present_p
);
493 /* Might want to check one level of transitivity to save conses.
494 This check should be done in maybe_add_or_update_back_dep_1.
495 Since we made it to add_or_update_back_dep_1, we must create
496 (or update) a link. */
500 gcc_assert (current_sched_info
->flags
& DO_SPECULATION
);
501 ds
= set_dep_weak (ds
, BEGIN_DATA
, estimate_dep_weak (mem1
, mem2
));
504 add_back_dep (insn
, elem
, dep_type
, ds
);
509 /* This function creates a link between INSN and ELEM under any
510 conditions. DS describes speculative status of the link. */
512 add_back_dep (rtx insn
, rtx elem
, enum reg_note dep_type
, ds_t ds
)
514 gcc_assert (INSN_P (insn
) && INSN_P (elem
) && insn
!= elem
);
516 if (current_sched_info
->flags
& USE_DEPS_LIST
)
517 LOG_LINKS (insn
) = alloc_DEPS_LIST (elem
, LOG_LINKS (insn
), ds
);
519 LOG_LINKS (insn
) = alloc_INSN_LIST (elem
, LOG_LINKS (insn
));
521 /* Insn dependency, not data dependency. */
522 PUT_REG_NOTE_KIND (LOG_LINKS (insn
), dep_type
);
524 #ifdef INSN_SCHEDULING
525 #ifdef ENABLE_CHECKING
526 check_dep_status (dep_type
, ds
, false);
529 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
530 in the bitmap caches of dependency information. */
531 if (true_dependency_cache
!= NULL
)
533 if (!(current_sched_info
->flags
& USE_DEPS_LIST
))
535 if (dep_type
== REG_DEP_TRUE
)
536 bitmap_set_bit (&true_dependency_cache
[INSN_LUID (insn
)],
538 else if (dep_type
== REG_DEP_OUTPUT
)
539 bitmap_set_bit (&output_dependency_cache
[INSN_LUID (insn
)],
541 else if (dep_type
== REG_DEP_ANTI
)
542 bitmap_set_bit (&anti_dependency_cache
[INSN_LUID (insn
)],
548 bitmap_set_bit (&true_dependency_cache
[INSN_LUID (insn
)],
551 bitmap_set_bit (&output_dependency_cache
[INSN_LUID (insn
)],
554 bitmap_set_bit (&anti_dependency_cache
[INSN_LUID (insn
)],
556 if (ds
& SPECULATIVE
)
558 gcc_assert (current_sched_info
->flags
& DO_SPECULATION
);
559 bitmap_set_bit (&spec_dependency_cache
[INSN_LUID (insn
)],
567 /* A convenience wrapper to operate on an entire list. */
570 add_dependence_list (rtx insn
, rtx list
, int uncond
, enum reg_note dep_type
)
572 for (; list
; list
= XEXP (list
, 1))
574 if (uncond
|| ! sched_insns_conditions_mutex_p (insn
, XEXP (list
, 0)))
575 add_dependence (insn
, XEXP (list
, 0), dep_type
);
579 /* Similar, but free *LISTP at the same time. */
582 add_dependence_list_and_free (rtx insn
, rtx
*listp
, int uncond
,
583 enum reg_note dep_type
)
586 for (list
= *listp
, *listp
= NULL
; list
; list
= next
)
588 next
= XEXP (list
, 1);
589 if (uncond
|| ! sched_insns_conditions_mutex_p (insn
, XEXP (list
, 0)))
590 add_dependence (insn
, XEXP (list
, 0), dep_type
);
591 free_INSN_LIST_node (list
);
595 /* Clear all dependencies for an insn. */
598 delete_all_dependences (rtx insn
)
600 /* Clear caches, if they exist, as well as free the dependence. */
602 #ifdef INSN_SCHEDULING
603 if (true_dependency_cache
!= NULL
)
605 bitmap_clear (&true_dependency_cache
[INSN_LUID (insn
)]);
606 bitmap_clear (&output_dependency_cache
[INSN_LUID (insn
)]);
607 bitmap_clear (&anti_dependency_cache
[INSN_LUID (insn
)]);
608 /* We don't have to clear forward_dependency_cache here,
609 because it is formed later. */
610 if (current_sched_info
->flags
& DO_SPECULATION
)
611 bitmap_clear (&spec_dependency_cache
[INSN_LUID (insn
)]);
615 if (!(current_sched_info
->flags
& USE_DEPS_LIST
))
616 /* In this case LOG_LINKS are formed from the DEPS_LISTs,
617 not the INSN_LISTs. */
618 free_INSN_LIST_list (&LOG_LINKS (insn
));
620 free_DEPS_LIST_list (&LOG_LINKS (insn
));
623 /* All insns in a scheduling group except the first should only have
624 dependencies on the previous insn in the group. So we find the
625 first instruction in the scheduling group by walking the dependence
626 chains backwards. Then we add the dependencies for the group to
627 the previous nonnote insn. */
630 fixup_sched_groups (rtx insn
)
632 rtx link
, prev_nonnote
;
634 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
639 i
= prev_nonnote_insn (i
);
641 if (XEXP (link
, 0) == i
)
643 } while (SCHED_GROUP_P (i
));
644 if (! sched_insns_conditions_mutex_p (i
, XEXP (link
, 0)))
645 add_dependence (i
, XEXP (link
, 0), REG_NOTE_KIND (link
));
649 delete_all_dependences (insn
);
651 prev_nonnote
= prev_nonnote_insn (insn
);
652 if (BLOCK_FOR_INSN (insn
) == BLOCK_FOR_INSN (prev_nonnote
)
653 && ! sched_insns_conditions_mutex_p (insn
, prev_nonnote
))
654 add_dependence (insn
, prev_nonnote
, REG_DEP_ANTI
);
657 /* Process an insn's memory dependencies. There are four kinds of
660 (0) read dependence: read follows read
661 (1) true dependence: read follows write
662 (2) output dependence: write follows write
663 (3) anti dependence: write follows read
665 We are careful to build only dependencies which actually exist, and
666 use transitivity to avoid building too many links. */
668 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
669 The MEM is a memory reference contained within INSN, which we are saving
670 so that we can do memory aliasing on it. */
673 add_insn_mem_dependence (struct deps
*deps
, rtx
*insn_list
, rtx
*mem_list
,
678 link
= alloc_INSN_LIST (insn
, *insn_list
);
681 if (current_sched_info
->use_cselib
)
683 mem
= shallow_copy_rtx (mem
);
684 XEXP (mem
, 0) = cselib_subst_to_values (XEXP (mem
, 0));
686 link
= alloc_EXPR_LIST (VOIDmode
, canon_rtx (mem
), *mem_list
);
689 deps
->pending_lists_length
++;
692 /* Make a dependency between every memory reference on the pending lists
693 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
694 dependencies for a read operation, similarly with FOR_WRITE. */
697 flush_pending_lists (struct deps
*deps
, rtx insn
, int for_read
,
702 add_dependence_list_and_free (insn
, &deps
->pending_read_insns
, 1,
704 free_EXPR_LIST_list (&deps
->pending_read_mems
);
707 add_dependence_list_and_free (insn
, &deps
->pending_write_insns
, 1,
708 for_read
? REG_DEP_ANTI
: REG_DEP_OUTPUT
);
709 free_EXPR_LIST_list (&deps
->pending_write_mems
);
710 deps
->pending_lists_length
= 0;
712 add_dependence_list_and_free (insn
, &deps
->last_pending_memory_flush
, 1,
713 for_read
? REG_DEP_ANTI
: REG_DEP_OUTPUT
);
714 deps
->last_pending_memory_flush
= alloc_INSN_LIST (insn
, NULL_RTX
);
715 deps
->pending_flush_length
= 1;
718 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
719 The type of the reference is specified by REF and can be SET,
720 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
723 sched_analyze_reg (struct deps
*deps
, int regno
, enum machine_mode mode
,
724 enum rtx_code ref
, rtx insn
)
726 /* A hard reg in a wide mode may really be multiple registers.
727 If so, mark all of them just like the first. */
728 if (regno
< FIRST_PSEUDO_REGISTER
)
730 int i
= hard_regno_nregs
[regno
][mode
];
734 SET_REGNO_REG_SET (reg_pending_sets
, regno
+ i
);
739 SET_REGNO_REG_SET (reg_pending_uses
, regno
+ i
);
744 SET_REGNO_REG_SET (reg_pending_clobbers
, regno
+ i
);
748 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
749 it does not reload. Ignore these as they have served their
751 else if (regno
>= deps
->max_reg
)
753 enum rtx_code code
= GET_CODE (PATTERN (insn
));
754 gcc_assert (code
== USE
|| code
== CLOBBER
);
760 SET_REGNO_REG_SET (reg_pending_sets
, regno
);
762 SET_REGNO_REG_SET (reg_pending_uses
, regno
);
764 SET_REGNO_REG_SET (reg_pending_clobbers
, regno
);
766 /* Pseudos that are REG_EQUIV to something may be replaced
767 by that during reloading. We need only add dependencies for
768 the address in the REG_EQUIV note. */
769 if (!reload_completed
&& get_reg_known_equiv_p (regno
))
771 rtx t
= get_reg_known_value (regno
);
773 sched_analyze_2 (deps
, XEXP (t
, 0), insn
);
776 /* Don't let it cross a call after scheduling if it doesn't
777 already cross one. */
778 if (REG_N_CALLS_CROSSED (regno
) == 0)
781 deps
->sched_before_next_call
782 = alloc_INSN_LIST (insn
, deps
->sched_before_next_call
);
784 add_dependence_list (insn
, deps
->last_function_call
, 1,
790 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
791 rtx, X, creating all dependencies generated by the write to the
792 destination of X, and reads of everything mentioned. */
795 sched_analyze_1 (struct deps
*deps
, rtx x
, rtx insn
)
797 rtx dest
= XEXP (x
, 0);
798 enum rtx_code code
= GET_CODE (x
);
803 if (GET_CODE (dest
) == PARALLEL
)
807 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
808 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
809 sched_analyze_1 (deps
,
810 gen_rtx_CLOBBER (VOIDmode
,
811 XEXP (XVECEXP (dest
, 0, i
), 0)),
814 if (GET_CODE (x
) == SET
)
815 sched_analyze_2 (deps
, SET_SRC (x
), insn
);
819 while (GET_CODE (dest
) == STRICT_LOW_PART
|| GET_CODE (dest
) == SUBREG
820 || GET_CODE (dest
) == ZERO_EXTRACT
)
822 if (GET_CODE (dest
) == STRICT_LOW_PART
823 || GET_CODE (dest
) == ZERO_EXTRACT
824 || df_read_modify_subreg_p (dest
))
826 /* These both read and modify the result. We must handle
827 them as writes to get proper dependencies for following
828 instructions. We must handle them as reads to get proper
829 dependencies from this to previous instructions.
830 Thus we need to call sched_analyze_2. */
832 sched_analyze_2 (deps
, XEXP (dest
, 0), insn
);
834 if (GET_CODE (dest
) == ZERO_EXTRACT
)
836 /* The second and third arguments are values read by this insn. */
837 sched_analyze_2 (deps
, XEXP (dest
, 1), insn
);
838 sched_analyze_2 (deps
, XEXP (dest
, 2), insn
);
840 dest
= XEXP (dest
, 0);
845 int regno
= REGNO (dest
);
846 enum machine_mode mode
= GET_MODE (dest
);
848 sched_analyze_reg (deps
, regno
, mode
, code
, insn
);
851 /* Treat all writes to a stack register as modifying the TOS. */
852 if (regno
>= FIRST_STACK_REG
&& regno
<= LAST_STACK_REG
)
854 /* Avoid analyzing the same register twice. */
855 if (regno
!= FIRST_STACK_REG
)
856 sched_analyze_reg (deps
, FIRST_STACK_REG
, mode
, code
, insn
);
857 sched_analyze_reg (deps
, FIRST_STACK_REG
, mode
, USE
, insn
);
861 else if (MEM_P (dest
))
863 /* Writing memory. */
866 if (current_sched_info
->use_cselib
)
868 t
= shallow_copy_rtx (dest
);
869 cselib_lookup (XEXP (t
, 0), Pmode
, 1);
870 XEXP (t
, 0) = cselib_subst_to_values (XEXP (t
, 0));
874 if (deps
->pending_lists_length
> MAX_PENDING_LIST_LENGTH
)
876 /* Flush all pending reads and writes to prevent the pending lists
877 from getting any larger. Insn scheduling runs too slowly when
878 these lists get long. When compiling GCC with itself,
879 this flush occurs 8 times for sparc, and 10 times for m88k using
880 the default value of 32. */
881 flush_pending_lists (deps
, insn
, false, true);
885 rtx pending
, pending_mem
;
887 pending
= deps
->pending_read_insns
;
888 pending_mem
= deps
->pending_read_mems
;
891 if (anti_dependence (XEXP (pending_mem
, 0), t
)
892 && ! sched_insns_conditions_mutex_p (insn
, XEXP (pending
, 0)))
893 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_ANTI
);
895 pending
= XEXP (pending
, 1);
896 pending_mem
= XEXP (pending_mem
, 1);
899 pending
= deps
->pending_write_insns
;
900 pending_mem
= deps
->pending_write_mems
;
903 if (output_dependence (XEXP (pending_mem
, 0), t
)
904 && ! sched_insns_conditions_mutex_p (insn
, XEXP (pending
, 0)))
905 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
907 pending
= XEXP (pending
, 1);
908 pending_mem
= XEXP (pending_mem
, 1);
911 add_dependence_list (insn
, deps
->last_pending_memory_flush
, 1,
914 add_insn_mem_dependence (deps
, &deps
->pending_write_insns
,
915 &deps
->pending_write_mems
, insn
, dest
);
917 sched_analyze_2 (deps
, XEXP (dest
, 0), insn
);
921 if (GET_CODE (x
) == SET
)
922 sched_analyze_2 (deps
, SET_SRC (x
), insn
);
925 /* Analyze the uses of memory and registers in rtx X in INSN. */
928 sched_analyze_2 (struct deps
*deps
, rtx x
, rtx insn
)
948 /* Ignore constants. Note that we must handle CONST_DOUBLE here
949 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
950 this does not mean that this insn is using cc0. */
955 /* User of CC0 depends on immediately preceding insn. */
956 SCHED_GROUP_P (insn
) = 1;
957 /* Don't move CC0 setter to another block (it can set up the
958 same flag for previous CC0 users which is safe). */
959 CANT_MOVE (prev_nonnote_insn (insn
)) = 1;
965 int regno
= REGNO (x
);
966 enum machine_mode mode
= GET_MODE (x
);
968 sched_analyze_reg (deps
, regno
, mode
, USE
, insn
);
971 /* Treat all reads of a stack register as modifying the TOS. */
972 if (regno
>= FIRST_STACK_REG
&& regno
<= LAST_STACK_REG
)
974 /* Avoid analyzing the same register twice. */
975 if (regno
!= FIRST_STACK_REG
)
976 sched_analyze_reg (deps
, FIRST_STACK_REG
, mode
, USE
, insn
);
977 sched_analyze_reg (deps
, FIRST_STACK_REG
, mode
, SET
, insn
);
985 /* Reading memory. */
987 rtx pending
, pending_mem
;
990 if (current_sched_info
->use_cselib
)
992 t
= shallow_copy_rtx (t
);
993 cselib_lookup (XEXP (t
, 0), Pmode
, 1);
994 XEXP (t
, 0) = cselib_subst_to_values (XEXP (t
, 0));
997 pending
= deps
->pending_read_insns
;
998 pending_mem
= deps
->pending_read_mems
;
1001 if (read_dependence (XEXP (pending_mem
, 0), t
)
1002 && ! sched_insns_conditions_mutex_p (insn
, XEXP (pending
, 0)))
1003 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_ANTI
);
1005 pending
= XEXP (pending
, 1);
1006 pending_mem
= XEXP (pending_mem
, 1);
1009 pending
= deps
->pending_write_insns
;
1010 pending_mem
= deps
->pending_write_mems
;
1013 if (true_dependence (XEXP (pending_mem
, 0), VOIDmode
,
1015 && ! sched_insns_conditions_mutex_p (insn
, XEXP (pending
, 0)))
1017 if (current_sched_info
->flags
& DO_SPECULATION
)
1018 maybe_add_or_update_back_dep_1 (insn
, XEXP (pending
, 0),
1020 BEGIN_DATA
| DEP_TRUE
,
1021 XEXP (pending_mem
, 0), t
, 0);
1023 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_TRUE
);
1026 pending
= XEXP (pending
, 1);
1027 pending_mem
= XEXP (pending_mem
, 1);
1030 for (u
= deps
->last_pending_memory_flush
; u
; u
= XEXP (u
, 1))
1031 if (! JUMP_P (XEXP (u
, 0)) || deps_may_trap_p (x
))
1032 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
1034 /* Always add these dependencies to pending_reads, since
1035 this insn may be followed by a write. */
1036 add_insn_mem_dependence (deps
, &deps
->pending_read_insns
,
1037 &deps
->pending_read_mems
, insn
, x
);
1039 /* Take advantage of tail recursion here. */
1040 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
1044 /* Force pending stores to memory in case a trap handler needs them. */
1046 flush_pending_lists (deps
, insn
, true, false);
1051 case UNSPEC_VOLATILE
:
1053 /* Traditional and volatile asm instructions must be considered to use
1054 and clobber all hard registers, all pseudo-registers and all of
1055 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
1057 Consider for instance a volatile asm that changes the fpu rounding
1058 mode. An insn should not be moved across this even if it only uses
1059 pseudo-regs because it might give an incorrectly rounded result. */
1060 if (code
!= ASM_OPERANDS
|| MEM_VOLATILE_P (x
))
1061 reg_pending_barrier
= TRUE_BARRIER
;
1063 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
1064 We can not just fall through here since then we would be confused
1065 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
1066 traditional asms unlike their normal usage. */
1068 if (code
== ASM_OPERANDS
)
1070 for (j
= 0; j
< ASM_OPERANDS_INPUT_LENGTH (x
); j
++)
1071 sched_analyze_2 (deps
, ASM_OPERANDS_INPUT (x
, j
), insn
);
1081 /* These both read and modify the result. We must handle them as writes
1082 to get proper dependencies for following instructions. We must handle
1083 them as reads to get proper dependencies from this to previous
1084 instructions. Thus we need to pass them to both sched_analyze_1
1085 and sched_analyze_2. We must call sched_analyze_2 first in order
1086 to get the proper antecedent for the read. */
1087 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
1088 sched_analyze_1 (deps
, x
, insn
);
1093 /* op0 = op0 + op1 */
1094 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
1095 sched_analyze_2 (deps
, XEXP (x
, 1), insn
);
1096 sched_analyze_1 (deps
, x
, insn
);
1103 /* Other cases: walk the insn. */
1104 fmt
= GET_RTX_FORMAT (code
);
1105 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1108 sched_analyze_2 (deps
, XEXP (x
, i
), insn
);
1109 else if (fmt
[i
] == 'E')
1110 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1111 sched_analyze_2 (deps
, XVECEXP (x
, i
, j
), insn
);
1115 /* Analyze an INSN with pattern X to find all dependencies. */
1118 sched_analyze_insn (struct deps
*deps
, rtx x
, rtx insn
)
1120 RTX_CODE code
= GET_CODE (x
);
1123 reg_set_iterator rsi
;
1125 if (code
== COND_EXEC
)
1127 sched_analyze_2 (deps
, COND_EXEC_TEST (x
), insn
);
1129 /* ??? Should be recording conditions so we reduce the number of
1130 false dependencies. */
1131 x
= COND_EXEC_CODE (x
);
1132 code
= GET_CODE (x
);
1134 if (code
== SET
|| code
== CLOBBER
)
1136 sched_analyze_1 (deps
, x
, insn
);
1138 /* Bare clobber insns are used for letting life analysis, reg-stack
1139 and others know that a value is dead. Depend on the last call
1140 instruction so that reg-stack won't get confused. */
1141 if (code
== CLOBBER
)
1142 add_dependence_list (insn
, deps
->last_function_call
, 1, REG_DEP_OUTPUT
);
1144 else if (code
== PARALLEL
)
1146 for (i
= XVECLEN (x
, 0); i
--;)
1148 rtx sub
= XVECEXP (x
, 0, i
);
1149 code
= GET_CODE (sub
);
1151 if (code
== COND_EXEC
)
1153 sched_analyze_2 (deps
, COND_EXEC_TEST (sub
), insn
);
1154 sub
= COND_EXEC_CODE (sub
);
1155 code
= GET_CODE (sub
);
1157 if (code
== SET
|| code
== CLOBBER
)
1158 sched_analyze_1 (deps
, sub
, insn
);
1160 sched_analyze_2 (deps
, sub
, insn
);
1164 sched_analyze_2 (deps
, x
, insn
);
1166 /* Mark registers CLOBBERED or used by called function. */
1169 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1171 if (GET_CODE (XEXP (link
, 0)) == CLOBBER
)
1172 sched_analyze_1 (deps
, XEXP (link
, 0), insn
);
1174 sched_analyze_2 (deps
, XEXP (link
, 0), insn
);
1176 if (find_reg_note (insn
, REG_SETJMP
, NULL
))
1177 reg_pending_barrier
= MOVE_BARRIER
;
1183 next
= next_nonnote_insn (insn
);
1184 if (next
&& BARRIER_P (next
))
1185 reg_pending_barrier
= TRUE_BARRIER
;
1188 rtx pending
, pending_mem
;
1189 regset_head tmp_uses
, tmp_sets
;
1190 INIT_REG_SET (&tmp_uses
);
1191 INIT_REG_SET (&tmp_sets
);
1193 (*current_sched_info
->compute_jump_reg_dependencies
)
1194 (insn
, &deps
->reg_conditional_sets
, &tmp_uses
, &tmp_sets
);
1195 /* Make latency of jump equal to 0 by using anti-dependence. */
1196 EXECUTE_IF_SET_IN_REG_SET (&tmp_uses
, 0, i
, rsi
)
1198 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1199 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_ANTI
);
1200 add_dependence_list (insn
, reg_last
->clobbers
, 0, REG_DEP_ANTI
);
1201 reg_last
->uses_length
++;
1202 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
1204 IOR_REG_SET (reg_pending_sets
, &tmp_sets
);
1206 CLEAR_REG_SET (&tmp_uses
);
1207 CLEAR_REG_SET (&tmp_sets
);
1209 /* All memory writes and volatile reads must happen before the
1210 jump. Non-volatile reads must happen before the jump iff
1211 the result is needed by the above register used mask. */
1213 pending
= deps
->pending_write_insns
;
1214 pending_mem
= deps
->pending_write_mems
;
1217 if (! sched_insns_conditions_mutex_p (insn
, XEXP (pending
, 0)))
1218 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
1219 pending
= XEXP (pending
, 1);
1220 pending_mem
= XEXP (pending_mem
, 1);
1223 pending
= deps
->pending_read_insns
;
1224 pending_mem
= deps
->pending_read_mems
;
1227 if (MEM_VOLATILE_P (XEXP (pending_mem
, 0))
1228 && ! sched_insns_conditions_mutex_p (insn
, XEXP (pending
, 0)))
1229 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
1230 pending
= XEXP (pending
, 1);
1231 pending_mem
= XEXP (pending_mem
, 1);
1234 add_dependence_list (insn
, deps
->last_pending_memory_flush
, 1,
1239 /* If this instruction can throw an exception, then moving it changes
1240 where block boundaries fall. This is mighty confusing elsewhere.
1241 Therefore, prevent such an instruction from being moved. Same for
1242 non-jump instructions that define block boundaries.
1243 ??? Unclear whether this is still necessary in EBB mode. If not,
1244 add_branch_dependences should be adjusted for RGN mode instead. */
1245 if (((CALL_P (insn
) || JUMP_P (insn
)) && can_throw_internal (insn
))
1246 || (NONJUMP_INSN_P (insn
) && control_flow_insn_p (insn
)))
1247 reg_pending_barrier
= MOVE_BARRIER
;
1249 /* Add dependencies if a scheduling barrier was found. */
1250 if (reg_pending_barrier
)
1252 /* In the case of barrier the most added dependencies are not
1253 real, so we use anti-dependence here. */
1254 if (sched_get_condition (insn
))
1256 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
, rsi
)
1258 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1259 add_dependence_list (insn
, reg_last
->uses
, 0, REG_DEP_ANTI
);
1261 (insn
, reg_last
->sets
, 0,
1262 reg_pending_barrier
== TRUE_BARRIER
? REG_DEP_TRUE
: REG_DEP_ANTI
);
1264 (insn
, reg_last
->clobbers
, 0,
1265 reg_pending_barrier
== TRUE_BARRIER
? REG_DEP_TRUE
: REG_DEP_ANTI
);
1270 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
, rsi
)
1272 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1273 add_dependence_list_and_free (insn
, ®_last
->uses
, 0,
1275 add_dependence_list_and_free
1276 (insn
, ®_last
->sets
, 0,
1277 reg_pending_barrier
== TRUE_BARRIER
? REG_DEP_TRUE
: REG_DEP_ANTI
);
1278 add_dependence_list_and_free
1279 (insn
, ®_last
->clobbers
, 0,
1280 reg_pending_barrier
== TRUE_BARRIER
? REG_DEP_TRUE
: REG_DEP_ANTI
);
1281 reg_last
->uses_length
= 0;
1282 reg_last
->clobbers_length
= 0;
1286 for (i
= 0; i
< (unsigned)deps
->max_reg
; i
++)
1288 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1289 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
1290 SET_REGNO_REG_SET (&deps
->reg_last_in_use
, i
);
1293 flush_pending_lists (deps
, insn
, true, true);
1294 CLEAR_REG_SET (&deps
->reg_conditional_sets
);
1295 reg_pending_barrier
= NOT_A_BARRIER
;
1299 /* If the current insn is conditional, we can't free any
1301 if (sched_get_condition (insn
))
1303 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses
, 0, i
, rsi
)
1305 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1306 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_TRUE
);
1307 add_dependence_list (insn
, reg_last
->clobbers
, 0, REG_DEP_TRUE
);
1308 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
1309 reg_last
->uses_length
++;
1311 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers
, 0, i
, rsi
)
1313 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1314 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_OUTPUT
);
1315 add_dependence_list (insn
, reg_last
->uses
, 0, REG_DEP_ANTI
);
1316 reg_last
->clobbers
= alloc_INSN_LIST (insn
, reg_last
->clobbers
);
1317 reg_last
->clobbers_length
++;
1319 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets
, 0, i
, rsi
)
1321 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1322 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_OUTPUT
);
1323 add_dependence_list (insn
, reg_last
->clobbers
, 0, REG_DEP_OUTPUT
);
1324 add_dependence_list (insn
, reg_last
->uses
, 0, REG_DEP_ANTI
);
1325 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
1326 SET_REGNO_REG_SET (&deps
->reg_conditional_sets
, i
);
1331 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses
, 0, i
, rsi
)
1333 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1334 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_TRUE
);
1335 add_dependence_list (insn
, reg_last
->clobbers
, 0, REG_DEP_TRUE
);
1336 reg_last
->uses_length
++;
1337 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
1339 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers
, 0, i
, rsi
)
1341 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1342 if (reg_last
->uses_length
> MAX_PENDING_LIST_LENGTH
1343 || reg_last
->clobbers_length
> MAX_PENDING_LIST_LENGTH
)
1345 add_dependence_list_and_free (insn
, ®_last
->sets
, 0,
1347 add_dependence_list_and_free (insn
, ®_last
->uses
, 0,
1349 add_dependence_list_and_free (insn
, ®_last
->clobbers
, 0,
1351 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
1352 reg_last
->clobbers_length
= 0;
1353 reg_last
->uses_length
= 0;
1357 add_dependence_list (insn
, reg_last
->sets
, 0, REG_DEP_OUTPUT
);
1358 add_dependence_list (insn
, reg_last
->uses
, 0, REG_DEP_ANTI
);
1360 reg_last
->clobbers_length
++;
1361 reg_last
->clobbers
= alloc_INSN_LIST (insn
, reg_last
->clobbers
);
1363 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets
, 0, i
, rsi
)
1365 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1366 add_dependence_list_and_free (insn
, ®_last
->sets
, 0,
1368 add_dependence_list_and_free (insn
, ®_last
->clobbers
, 0,
1370 add_dependence_list_and_free (insn
, ®_last
->uses
, 0,
1372 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
1373 reg_last
->uses_length
= 0;
1374 reg_last
->clobbers_length
= 0;
1375 CLEAR_REGNO_REG_SET (&deps
->reg_conditional_sets
, i
);
1379 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_uses
);
1380 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_clobbers
);
1381 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_sets
);
1383 CLEAR_REG_SET (reg_pending_uses
);
1384 CLEAR_REG_SET (reg_pending_clobbers
);
1385 CLEAR_REG_SET (reg_pending_sets
);
1387 /* If we are currently in a libcall scheduling group, then mark the
1388 current insn as being in a scheduling group and that it can not
1389 be moved into a different basic block. */
1391 if (deps
->libcall_block_tail_insn
)
1393 SCHED_GROUP_P (insn
) = 1;
1394 CANT_MOVE (insn
) = 1;
1397 /* If a post-call group is still open, see if it should remain so.
1398 This insn must be a simple move of a hard reg to a pseudo or
1401 We must avoid moving these insns for correctness on
1402 SMALL_REGISTER_CLASS machines, and for special registers like
1403 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1404 hard regs for all targets. */
1406 if (deps
->in_post_call_group_p
)
1408 rtx tmp
, set
= single_set (insn
);
1409 int src_regno
, dest_regno
;
1412 goto end_call_group
;
1414 tmp
= SET_DEST (set
);
1415 if (GET_CODE (tmp
) == SUBREG
)
1416 tmp
= SUBREG_REG (tmp
);
1418 dest_regno
= REGNO (tmp
);
1420 goto end_call_group
;
1422 tmp
= SET_SRC (set
);
1423 if (GET_CODE (tmp
) == SUBREG
)
1424 tmp
= SUBREG_REG (tmp
);
1425 if ((GET_CODE (tmp
) == PLUS
1426 || GET_CODE (tmp
) == MINUS
)
1427 && REG_P (XEXP (tmp
, 0))
1428 && REGNO (XEXP (tmp
, 0)) == STACK_POINTER_REGNUM
1429 && dest_regno
== STACK_POINTER_REGNUM
)
1430 src_regno
= STACK_POINTER_REGNUM
;
1431 else if (REG_P (tmp
))
1432 src_regno
= REGNO (tmp
);
1434 goto end_call_group
;
1436 if (src_regno
< FIRST_PSEUDO_REGISTER
1437 || dest_regno
< FIRST_PSEUDO_REGISTER
)
1439 if (deps
->in_post_call_group_p
== post_call_initial
)
1440 deps
->in_post_call_group_p
= post_call
;
1442 SCHED_GROUP_P (insn
) = 1;
1443 CANT_MOVE (insn
) = 1;
1448 deps
->in_post_call_group_p
= not_post_call
;
1452 /* Fixup the dependencies in the sched group. */
1453 if (SCHED_GROUP_P (insn
))
1454 fixup_sched_groups (insn
);
1457 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1458 for every dependency. */
1461 sched_analyze (struct deps
*deps
, rtx head
, rtx tail
)
1465 if (current_sched_info
->use_cselib
)
1468 /* Before reload, if the previous block ended in a call, show that
1469 we are inside a post-call group, so as to keep the lifetimes of
1470 hard registers correct. */
1471 if (! reload_completed
&& !LABEL_P (head
))
1473 insn
= prev_nonnote_insn (head
);
1474 if (insn
&& CALL_P (insn
))
1475 deps
->in_post_call_group_p
= post_call_initial
;
1477 for (insn
= head
;; insn
= NEXT_INSN (insn
))
1479 rtx link
, end_seq
, r0
, set
;
1481 if (NONJUMP_INSN_P (insn
) || JUMP_P (insn
))
1483 /* Clear out the stale LOG_LINKS from flow. */
1484 free_INSN_LIST_list (&LOG_LINKS (insn
));
1486 /* Make each JUMP_INSN a scheduling barrier for memory
1490 /* Keep the list a reasonable size. */
1491 if (deps
->pending_flush_length
++ > MAX_PENDING_LIST_LENGTH
)
1492 flush_pending_lists (deps
, insn
, true, true);
1494 deps
->last_pending_memory_flush
1495 = alloc_INSN_LIST (insn
, deps
->last_pending_memory_flush
);
1497 sched_analyze_insn (deps
, PATTERN (insn
), insn
);
1499 else if (CALL_P (insn
))
1503 CANT_MOVE (insn
) = 1;
1505 /* Clear out the stale LOG_LINKS from flow. */
1506 free_INSN_LIST_list (&LOG_LINKS (insn
));
1508 if (find_reg_note (insn
, REG_SETJMP
, NULL
))
1510 /* This is setjmp. Assume that all registers, not just
1511 hard registers, may be clobbered by this call. */
1512 reg_pending_barrier
= MOVE_BARRIER
;
1516 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
1517 /* A call may read and modify global register variables. */
1520 SET_REGNO_REG_SET (reg_pending_sets
, i
);
1521 SET_REGNO_REG_SET (reg_pending_uses
, i
);
1523 /* Other call-clobbered hard regs may be clobbered.
1524 Since we only have a choice between 'might be clobbered'
1525 and 'definitely not clobbered', we must include all
1526 partly call-clobbered registers here. */
1527 else if (HARD_REGNO_CALL_PART_CLOBBERED (i
, reg_raw_mode
[i
])
1528 || TEST_HARD_REG_BIT (regs_invalidated_by_call
, i
))
1529 SET_REGNO_REG_SET (reg_pending_clobbers
, i
);
1530 /* We don't know what set of fixed registers might be used
1531 by the function, but it is certain that the stack pointer
1532 is among them, but be conservative. */
1533 else if (fixed_regs
[i
])
1534 SET_REGNO_REG_SET (reg_pending_uses
, i
);
1535 /* The frame pointer is normally not used by the function
1536 itself, but by the debugger. */
1537 /* ??? MIPS o32 is an exception. It uses the frame pointer
1538 in the macro expansion of jal but does not represent this
1539 fact in the call_insn rtl. */
1540 else if (i
== FRAME_POINTER_REGNUM
1541 || (i
== HARD_FRAME_POINTER_REGNUM
1542 && (! reload_completed
|| frame_pointer_needed
)))
1543 SET_REGNO_REG_SET (reg_pending_uses
, i
);
1546 /* For each insn which shouldn't cross a call, add a dependence
1547 between that insn and this call insn. */
1548 add_dependence_list_and_free (insn
, &deps
->sched_before_next_call
, 1,
1551 sched_analyze_insn (deps
, PATTERN (insn
), insn
);
1553 /* In the absence of interprocedural alias analysis, we must flush
1554 all pending reads and writes, and start new dependencies starting
1555 from here. But only flush writes for constant calls (which may
1556 be passed a pointer to something we haven't written yet). */
1557 flush_pending_lists (deps
, insn
, true, !CONST_OR_PURE_CALL_P (insn
));
1559 /* Remember the last function call for limiting lifetimes. */
1560 free_INSN_LIST_list (&deps
->last_function_call
);
1561 deps
->last_function_call
= alloc_INSN_LIST (insn
, NULL_RTX
);
1563 /* Before reload, begin a post-call group, so as to keep the
1564 lifetimes of hard registers correct. */
1565 if (! reload_completed
)
1566 deps
->in_post_call_group_p
= post_call
;
1569 /* EH_REGION insn notes can not appear until well after we complete
1572 gcc_assert (NOTE_LINE_NUMBER (insn
) != NOTE_INSN_EH_REGION_BEG
1573 && NOTE_LINE_NUMBER (insn
) != NOTE_INSN_EH_REGION_END
);
1575 if (current_sched_info
->use_cselib
)
1576 cselib_process_insn (insn
);
1578 /* Now that we have completed handling INSN, check and see if it is
1579 a CLOBBER beginning a libcall block. If it is, record the
1580 end of the libcall sequence.
1582 We want to schedule libcall blocks as a unit before reload. While
1583 this restricts scheduling, it preserves the meaning of a libcall
1586 As a side effect, we may get better code due to decreased register
1587 pressure as well as less chance of a foreign insn appearing in
1589 if (!reload_completed
1590 /* Note we may have nested libcall sequences. We only care about
1591 the outermost libcall sequence. */
1592 && deps
->libcall_block_tail_insn
== 0
1593 /* The sequence must start with a clobber of a register. */
1594 && NONJUMP_INSN_P (insn
)
1595 && GET_CODE (PATTERN (insn
)) == CLOBBER
1596 && (r0
= XEXP (PATTERN (insn
), 0), REG_P (r0
))
1597 && REG_P (XEXP (PATTERN (insn
), 0))
1598 /* The CLOBBER must also have a REG_LIBCALL note attached. */
1599 && (link
= find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
)) != 0
1600 && (end_seq
= XEXP (link
, 0)) != 0
1601 /* The insn referenced by the REG_LIBCALL note must be a
1602 simple nop copy with the same destination as the register
1603 mentioned in the clobber. */
1604 && (set
= single_set (end_seq
)) != 0
1605 && SET_DEST (set
) == r0
&& SET_SRC (set
) == r0
1606 /* And finally the insn referenced by the REG_LIBCALL must
1607 also contain a REG_EQUAL note and a REG_RETVAL note. */
1608 && find_reg_note (end_seq
, REG_EQUAL
, NULL_RTX
) != 0
1609 && find_reg_note (end_seq
, REG_RETVAL
, NULL_RTX
) != 0)
1610 deps
->libcall_block_tail_insn
= XEXP (link
, 0);
1612 /* If we have reached the end of a libcall block, then close the
1614 if (deps
->libcall_block_tail_insn
== insn
)
1615 deps
->libcall_block_tail_insn
= 0;
1619 if (current_sched_info
->use_cselib
)
1628 /* The following function adds forward dependence (FROM, TO) with
1629 given DEP_TYPE. The forward dependence should be not exist before. */
1632 add_forw_dep (rtx to
, rtx link
)
1636 from
= XEXP (link
, 0);
1638 #ifdef ENABLE_CHECKING
1639 /* If add_dependence is working properly there should never
1640 be notes, deleted insns or duplicates in the backward
1641 links. Thus we need not check for them here.
1643 However, if we have enabled checking we might as well go
1644 ahead and verify that add_dependence worked properly. */
1645 gcc_assert (INSN_P (from
));
1646 gcc_assert (!INSN_DELETED_P (from
));
1647 if (true_dependency_cache
)
1649 gcc_assert (!bitmap_bit_p (&forward_dependency_cache
[INSN_LUID (from
)],
1651 bitmap_set_bit (&forward_dependency_cache
[INSN_LUID (from
)],
1655 gcc_assert (!find_insn_list (to
, INSN_DEPEND (from
)));
1658 if (!(current_sched_info
->flags
& USE_DEPS_LIST
))
1659 new_link
= alloc_INSN_LIST (to
, INSN_DEPEND (from
));
1661 new_link
= alloc_DEPS_LIST (to
, INSN_DEPEND (from
), DEP_STATUS (link
));
1663 PUT_REG_NOTE_KIND (new_link
, REG_NOTE_KIND (link
));
1665 INSN_DEPEND (from
) = new_link
;
1666 INSN_DEP_COUNT (to
) += 1;
1669 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1670 dependences from LOG_LINKS to build forward dependences in
1674 compute_forward_dependences (rtx head
, rtx tail
)
1679 next_tail
= NEXT_INSN (tail
);
1680 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
1684 if (! INSN_P (insn
))
1687 if (current_sched_info
->flags
& DO_SPECULATION
)
1689 rtx
new = 0, link
, next
;
1691 for (link
= LOG_LINKS (insn
); link
; link
= next
)
1693 next
= XEXP (link
, 1);
1694 adjust_add_sorted_back_dep (insn
, link
, &new);
1697 LOG_LINKS (insn
) = new;
1700 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
1701 add_forw_dep (insn
, link
);
1705 /* Initialize variables for region data dependence analysis.
1706 n_bbs is the number of region blocks. */
1709 init_deps (struct deps
*deps
)
1711 int max_reg
= (reload_completed
? FIRST_PSEUDO_REGISTER
: max_reg_num ());
1713 deps
->max_reg
= max_reg
;
1714 deps
->reg_last
= XCNEWVEC (struct deps_reg
, max_reg
);
1715 INIT_REG_SET (&deps
->reg_last_in_use
);
1716 INIT_REG_SET (&deps
->reg_conditional_sets
);
1718 deps
->pending_read_insns
= 0;
1719 deps
->pending_read_mems
= 0;
1720 deps
->pending_write_insns
= 0;
1721 deps
->pending_write_mems
= 0;
1722 deps
->pending_lists_length
= 0;
1723 deps
->pending_flush_length
= 0;
1724 deps
->last_pending_memory_flush
= 0;
1725 deps
->last_function_call
= 0;
1726 deps
->sched_before_next_call
= 0;
1727 deps
->in_post_call_group_p
= not_post_call
;
1728 deps
->libcall_block_tail_insn
= 0;
1731 /* Free insn lists found in DEPS. */
1734 free_deps (struct deps
*deps
)
1737 reg_set_iterator rsi
;
1739 free_INSN_LIST_list (&deps
->pending_read_insns
);
1740 free_EXPR_LIST_list (&deps
->pending_read_mems
);
1741 free_INSN_LIST_list (&deps
->pending_write_insns
);
1742 free_EXPR_LIST_list (&deps
->pending_write_mems
);
1743 free_INSN_LIST_list (&deps
->last_pending_memory_flush
);
1745 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
1746 times. For a testcase with 42000 regs and 8000 small basic blocks,
1747 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
1748 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
, rsi
)
1750 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1752 free_INSN_LIST_list (®_last
->uses
);
1754 free_INSN_LIST_list (®_last
->sets
);
1755 if (reg_last
->clobbers
)
1756 free_INSN_LIST_list (®_last
->clobbers
);
1758 CLEAR_REG_SET (&deps
->reg_last_in_use
);
1759 CLEAR_REG_SET (&deps
->reg_conditional_sets
);
1761 free (deps
->reg_last
);
1764 /* If it is profitable to use them, initialize caches for tracking
1765 dependency information. LUID is the number of insns to be scheduled,
1766 it is used in the estimate of profitability. */
1769 init_dependency_caches (int luid
)
1771 /* ?!? We could save some memory by computing a per-region luid mapping
1772 which could reduce both the number of vectors in the cache and the size
1773 of each vector. Instead we just avoid the cache entirely unless the
1774 average number of instructions in a basic block is very high. See
1775 the comment before the declaration of true_dependency_cache for
1776 what we consider "very high". */
1777 if (luid
/ n_basic_blocks
> 100 * 5)
1780 extend_dependency_caches (luid
, true);
1784 /* Create or extend (depending on CREATE_P) dependency caches to
1787 extend_dependency_caches (int n
, bool create_p
)
1789 if (create_p
|| true_dependency_cache
)
1791 int i
, luid
= cache_size
+ n
;
1793 true_dependency_cache
= XRESIZEVEC (bitmap_head
, true_dependency_cache
,
1795 output_dependency_cache
= XRESIZEVEC (bitmap_head
,
1796 output_dependency_cache
, luid
);
1797 anti_dependency_cache
= XRESIZEVEC (bitmap_head
, anti_dependency_cache
,
1799 #ifdef ENABLE_CHECKING
1800 forward_dependency_cache
= XRESIZEVEC (bitmap_head
,
1801 forward_dependency_cache
, luid
);
1803 if (current_sched_info
->flags
& DO_SPECULATION
)
1804 spec_dependency_cache
= XRESIZEVEC (bitmap_head
, spec_dependency_cache
,
1807 for (i
= cache_size
; i
< luid
; i
++)
1809 bitmap_initialize (&true_dependency_cache
[i
], 0);
1810 bitmap_initialize (&output_dependency_cache
[i
], 0);
1811 bitmap_initialize (&anti_dependency_cache
[i
], 0);
1812 #ifdef ENABLE_CHECKING
1813 bitmap_initialize (&forward_dependency_cache
[i
], 0);
1815 if (current_sched_info
->flags
& DO_SPECULATION
)
1816 bitmap_initialize (&spec_dependency_cache
[i
], 0);
1822 /* Free the caches allocated in init_dependency_caches. */
1825 free_dependency_caches (void)
1827 if (true_dependency_cache
)
1831 for (i
= 0; i
< cache_size
; i
++)
1833 bitmap_clear (&true_dependency_cache
[i
]);
1834 bitmap_clear (&output_dependency_cache
[i
]);
1835 bitmap_clear (&anti_dependency_cache
[i
]);
1836 #ifdef ENABLE_CHECKING
1837 bitmap_clear (&forward_dependency_cache
[i
]);
1839 if (current_sched_info
->flags
& DO_SPECULATION
)
1840 bitmap_clear (&spec_dependency_cache
[i
]);
1842 free (true_dependency_cache
);
1843 true_dependency_cache
= NULL
;
1844 free (output_dependency_cache
);
1845 output_dependency_cache
= NULL
;
1846 free (anti_dependency_cache
);
1847 anti_dependency_cache
= NULL
;
1848 #ifdef ENABLE_CHECKING
1849 free (forward_dependency_cache
);
1850 forward_dependency_cache
= NULL
;
1852 if (current_sched_info
->flags
& DO_SPECULATION
)
1854 free (spec_dependency_cache
);
1855 spec_dependency_cache
= NULL
;
1860 /* Initialize some global variables needed by the dependency analysis
1864 init_deps_global (void)
1866 reg_pending_sets
= ALLOC_REG_SET (®_obstack
);
1867 reg_pending_clobbers
= ALLOC_REG_SET (®_obstack
);
1868 reg_pending_uses
= ALLOC_REG_SET (®_obstack
);
1869 reg_pending_barrier
= NOT_A_BARRIER
;
1872 /* Free everything used by the dependency analysis code. */
1875 finish_deps_global (void)
1877 FREE_REG_SET (reg_pending_sets
);
1878 FREE_REG_SET (reg_pending_clobbers
);
1879 FREE_REG_SET (reg_pending_uses
);
1882 /* Insert LINK into the dependence chain pointed to by LINKP and
1883 maintain the sort order. */
1885 adjust_add_sorted_back_dep (rtx insn
, rtx link
, rtx
*linkp
)
1887 gcc_assert (current_sched_info
->flags
& DO_SPECULATION
);
1889 /* If the insn cannot move speculatively, but the link is speculative,
1890 make it hard dependence. */
1891 if (HAS_INTERNAL_DEP (insn
)
1892 && (DEP_STATUS (link
) & SPECULATIVE
))
1894 DEP_STATUS (link
) &= ~SPECULATIVE
;
1896 if (true_dependency_cache
)
1897 bitmap_clear_bit (&spec_dependency_cache
[INSN_LUID (insn
)],
1898 INSN_LUID (XEXP (link
, 0)));
1901 /* Non-speculative links go at the head of LOG_LINKS, followed by
1902 speculative links. */
1903 if (DEP_STATUS (link
) & SPECULATIVE
)
1904 while (*linkp
&& !(DEP_STATUS (*linkp
) & SPECULATIVE
))
1905 linkp
= &XEXP (*linkp
, 1);
1907 XEXP (link
, 1) = *linkp
;
1911 /* Move the dependence pointed to by LINKP to the back dependencies
1912 of INSN, and also add this dependence to the forward ones. All LOG_LINKS,
1913 except one pointed to by LINKP, must be sorted. */
1915 adjust_back_add_forw_dep (rtx insn
, rtx
*linkp
)
1919 gcc_assert (current_sched_info
->flags
& DO_SPECULATION
);
1922 *linkp
= XEXP (*linkp
, 1);
1924 adjust_add_sorted_back_dep (insn
, link
, &LOG_LINKS (insn
));
1925 add_forw_dep (insn
, link
);
1928 /* Remove forward dependence ELEM from the DEPS_LIST of INSN. */
1930 delete_forw_dep (rtx insn
, rtx elem
)
1932 gcc_assert (current_sched_info
->flags
& DO_SPECULATION
);
1934 #ifdef ENABLE_CHECKING
1935 if (true_dependency_cache
)
1936 bitmap_clear_bit (&forward_dependency_cache
[INSN_LUID (elem
)],
1940 remove_free_DEPS_LIST_elem (insn
, &INSN_DEPEND (elem
));
1941 INSN_DEP_COUNT (insn
)--;
1944 /* Estimate the weakness of dependence between MEM1 and MEM2. */
1946 estimate_dep_weak (rtx mem1
, rtx mem2
)
1951 /* MEMs are the same - don't speculate. */
1952 return MIN_DEP_WEAK
;
1954 r1
= XEXP (mem1
, 0);
1955 r2
= XEXP (mem2
, 0);
1958 || (REG_P (r1
) && REG_P (r2
)
1959 && REGNO (r1
) == REGNO (r2
)))
1960 /* Again, MEMs are the same. */
1961 return MIN_DEP_WEAK
;
1962 else if ((REG_P (r1
) && !REG_P (r2
))
1963 || (!REG_P (r1
) && REG_P (r2
)))
1964 /* Different addressing modes - reason to be more speculative,
1966 return NO_DEP_WEAK
- (NO_DEP_WEAK
- UNCERTAIN_DEP_WEAK
) / 2;
1968 /* We can't say anything about the dependence. */
1969 return UNCERTAIN_DEP_WEAK
;
1972 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
1973 This function can handle same INSN and ELEM (INSN == ELEM).
1974 It is a convenience wrapper. */
1976 add_dependence (rtx insn
, rtx elem
, enum reg_note dep_type
)
1980 if (dep_type
== REG_DEP_TRUE
)
1982 else if (dep_type
== REG_DEP_OUTPUT
)
1984 else if (dep_type
== REG_DEP_ANTI
)
1989 maybe_add_or_update_back_dep_1 (insn
, elem
, dep_type
, ds
, 0, 0, 0);
1992 /* Add or update backward dependence between INSN and ELEM
1993 with given type DEP_TYPE and dep_status DS.
1994 This function is a convenience wrapper. */
1995 enum DEPS_ADJUST_RESULT
1996 add_or_update_back_dep (rtx insn
, rtx elem
, enum reg_note dep_type
, ds_t ds
)
1998 return add_or_update_back_dep_1 (insn
, elem
, dep_type
, ds
, 0, 0, 0);
2001 /* Add or update both backward and forward dependencies between INSN and ELEM
2002 with given type DEP_TYPE and dep_status DS. */
2004 add_or_update_back_forw_dep (rtx insn
, rtx elem
, enum reg_note dep_type
,
2007 enum DEPS_ADJUST_RESULT res
;
2010 res
= add_or_update_back_dep_1 (insn
, elem
, dep_type
, ds
, 0, 0, &linkp
);
2012 if (res
== DEP_CHANGED
|| res
== DEP_CREATED
)
2014 if (res
== DEP_CHANGED
)
2015 delete_forw_dep (insn
, elem
);
2016 else if (res
== DEP_CREATED
)
2017 linkp
= &LOG_LINKS (insn
);
2019 adjust_back_add_forw_dep (insn
, linkp
);
2023 /* Add both backward and forward dependencies between INSN and ELEM
2024 with given type DEP_TYPE and dep_status DS. */
2026 add_back_forw_dep (rtx insn
, rtx elem
, enum reg_note dep_type
, ds_t ds
)
2028 add_back_dep (insn
, elem
, dep_type
, ds
);
2029 adjust_back_add_forw_dep (insn
, &LOG_LINKS (insn
));
2032 /* Remove both backward and forward dependencies between INSN and ELEM. */
2034 delete_back_forw_dep (rtx insn
, rtx elem
)
2036 gcc_assert (current_sched_info
->flags
& DO_SPECULATION
);
2038 if (true_dependency_cache
!= NULL
)
2040 bitmap_clear_bit (&true_dependency_cache
[INSN_LUID (insn
)],
2042 bitmap_clear_bit (&anti_dependency_cache
[INSN_LUID (insn
)],
2044 bitmap_clear_bit (&output_dependency_cache
[INSN_LUID (insn
)],
2046 bitmap_clear_bit (&spec_dependency_cache
[INSN_LUID (insn
)],
2050 remove_free_DEPS_LIST_elem (elem
, &LOG_LINKS (insn
));
2051 delete_forw_dep (insn
, elem
);
2054 /* Return weakness of speculative type TYPE in the dep_status DS. */
2056 get_dep_weak (ds_t ds
, ds_t type
)
2061 case BEGIN_DATA
: ds
>>= BEGIN_DATA_BITS_OFFSET
; break;
2062 case BE_IN_DATA
: ds
>>= BE_IN_DATA_BITS_OFFSET
; break;
2063 case BEGIN_CONTROL
: ds
>>= BEGIN_CONTROL_BITS_OFFSET
; break;
2064 case BE_IN_CONTROL
: ds
>>= BE_IN_CONTROL_BITS_OFFSET
; break;
2065 default: gcc_unreachable ();
2068 gcc_assert (MIN_DEP_WEAK
<= ds
&& ds
<= MAX_DEP_WEAK
);
2072 /* Return the dep_status, which has the same parameters as DS, except for
2073 speculative type TYPE, that will have weakness DW. */
2075 set_dep_weak (ds_t ds
, ds_t type
, dw_t dw
)
2077 gcc_assert (MIN_DEP_WEAK
<= dw
&& dw
<= MAX_DEP_WEAK
);
2082 case BEGIN_DATA
: ds
|= ((ds_t
) dw
) << BEGIN_DATA_BITS_OFFSET
; break;
2083 case BE_IN_DATA
: ds
|= ((ds_t
) dw
) << BE_IN_DATA_BITS_OFFSET
; break;
2084 case BEGIN_CONTROL
: ds
|= ((ds_t
) dw
) << BEGIN_CONTROL_BITS_OFFSET
; break;
2085 case BE_IN_CONTROL
: ds
|= ((ds_t
) dw
) << BE_IN_CONTROL_BITS_OFFSET
; break;
2086 default: gcc_unreachable ();
2091 /* Return the join of two dep_statuses DS1 and DS2. */
2093 ds_merge (ds_t ds1
, ds_t ds2
)
2097 gcc_assert ((ds1
& SPECULATIVE
) && (ds2
& SPECULATIVE
));
2099 ds
= (ds1
& DEP_TYPES
) | (ds2
& DEP_TYPES
);
2101 t
= FIRST_SPEC_TYPE
;
2104 if ((ds1
& t
) && !(ds2
& t
))
2106 else if (!(ds1
& t
) && (ds2
& t
))
2108 else if ((ds1
& t
) && (ds2
& t
))
2112 dw
= ((ds_t
) get_dep_weak (ds1
, t
)) * ((ds_t
) get_dep_weak (ds2
, t
));
2114 if (dw
< MIN_DEP_WEAK
)
2117 ds
= set_dep_weak (ds
, t
, (dw_t
) dw
);
2120 if (t
== LAST_SPEC_TYPE
)
2122 t
<<= SPEC_TYPE_SHIFT
;
2129 #ifdef INSN_SCHEDULING
2130 #ifdef ENABLE_CHECKING
2131 /* Verify that dependence type and status are consistent.
2132 If RELAXED_P is true, then skip dep_weakness checks. */
2134 check_dep_status (enum reg_note dt
, ds_t ds
, bool relaxed_p
)
2136 /* Check that dependence type contains the same bits as the status. */
2137 if (dt
== REG_DEP_TRUE
)
2138 gcc_assert (ds
& DEP_TRUE
);
2139 else if (dt
== REG_DEP_OUTPUT
)
2140 gcc_assert ((ds
& DEP_OUTPUT
)
2141 && !(ds
& DEP_TRUE
));
2143 gcc_assert ((dt
== REG_DEP_ANTI
)
2145 && !(ds
& (DEP_OUTPUT
| DEP_TRUE
)));
2147 /* HARD_DEP can not appear in dep_status of a link. */
2148 gcc_assert (!(ds
& HARD_DEP
));
2150 /* Check that dependence status is set correctly when speculation is not
2152 if (!(current_sched_info
->flags
& DO_SPECULATION
))
2153 gcc_assert (!(ds
& SPECULATIVE
));
2154 else if (ds
& SPECULATIVE
)
2158 ds_t type
= FIRST_SPEC_TYPE
;
2160 /* Check that dependence weakness is in proper range. */
2164 get_dep_weak (ds
, type
);
2166 if (type
== LAST_SPEC_TYPE
)
2168 type
<<= SPEC_TYPE_SHIFT
;
2173 if (ds
& BEGIN_SPEC
)
2175 /* Only true dependence can be data speculative. */
2176 if (ds
& BEGIN_DATA
)
2177 gcc_assert (ds
& DEP_TRUE
);
2179 /* Control dependencies in the insn scheduler are represented by
2180 anti-dependencies, therefore only anti dependence can be
2181 control speculative. */
2182 if (ds
& BEGIN_CONTROL
)
2183 gcc_assert (ds
& DEP_ANTI
);
2187 /* Subsequent speculations should resolve true dependencies. */
2188 gcc_assert ((ds
& DEP_TYPES
) == DEP_TRUE
);
2191 /* Check that true and anti dependencies can't have other speculative
2194 gcc_assert (ds
& (BEGIN_DATA
| BE_IN_SPEC
));
2195 /* An output dependence can't be speculative at all. */
2196 gcc_assert (!(ds
& DEP_OUTPUT
));
2198 gcc_assert (ds
& BEGIN_CONTROL
);