1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to the Free
22 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
27 #include "coretypes.h"
32 #include "hard-reg-set.h"
36 #include "insn-config.h"
37 #include "insn-attr.h"
41 #include "sched-int.h"
47 static regset reg_pending_sets
;
48 static regset reg_pending_clobbers
;
49 static regset reg_pending_uses
;
51 /* The following enumeration values tell us what dependencies we
52 should use to implement the barrier. We use true-dependencies for
53 TRUE_BARRIER and anti-dependencies for MOVE_BARRIER. */
54 enum reg_pending_barrier_mode
61 static enum reg_pending_barrier_mode reg_pending_barrier
;
63 /* To speed up the test for duplicate dependency links we keep a
64 record of dependencies created by add_dependence when the average
65 number of instructions in a basic block is very large.
67 Studies have shown that there is typically around 5 instructions between
68 branches for typical C code. So we can make a guess that the average
69 basic block is approximately 5 instructions long; we will choose 100X
70 the average size as a very large basic block.
72 Each insn has associated bitmaps for its dependencies. Each bitmap
73 has enough entries to represent a dependency on any other insn in
74 the insn chain. All bitmap for true dependencies cache is
75 allocated then the rest two ones are also allocated. */
76 static bitmap_head
*true_dependency_cache
;
77 static bitmap_head
*anti_dependency_cache
;
78 static bitmap_head
*output_dependency_cache
;
79 static int cache_size
;
81 /* To speed up checking consistency of formed forward insn
82 dependencies we use the following cache. Another possible solution
83 could be switching off checking duplication of insns in forward
85 #ifdef ENABLE_CHECKING
86 static bitmap_head
*forward_dependency_cache
;
89 static int deps_may_trap_p (rtx
);
90 static void add_dependence_list (rtx
, rtx
, enum reg_note
);
91 static void add_dependence_list_and_free (rtx
, rtx
*, enum reg_note
);
92 static void delete_all_dependences (rtx
);
93 static void fixup_sched_groups (rtx
);
95 static void flush_pending_lists (struct deps
*, rtx
, int, int);
96 static void sched_analyze_1 (struct deps
*, rtx
, rtx
);
97 static void sched_analyze_2 (struct deps
*, rtx
, rtx
);
98 static void sched_analyze_insn (struct deps
*, rtx
, rtx
, rtx
);
100 static rtx
sched_get_condition (rtx
);
101 static int conditions_mutex_p (rtx
, rtx
);
103 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
106 deps_may_trap_p (rtx mem
)
108 rtx addr
= XEXP (mem
, 0);
110 if (REG_P (addr
) && REGNO (addr
) >= FIRST_PSEUDO_REGISTER
)
112 rtx t
= get_reg_known_value (REGNO (addr
));
116 return rtx_addr_can_trap_p (addr
);
119 /* Return the INSN_LIST containing INSN in LIST, or NULL
120 if LIST does not contain INSN. */
123 find_insn_list (rtx insn
, rtx list
)
127 if (XEXP (list
, 0) == insn
)
129 list
= XEXP (list
, 1);
134 /* Find the condition under which INSN is executed. */
137 sched_get_condition (rtx insn
)
139 rtx pat
= PATTERN (insn
);
145 if (GET_CODE (pat
) == COND_EXEC
)
146 return COND_EXEC_TEST (pat
);
148 if (!any_condjump_p (insn
) || !onlyjump_p (insn
))
151 src
= SET_SRC (pc_set (insn
));
153 /* The previous code here was completely invalid and could never extract
154 the condition from a jump. This code does the correct thing, but that
155 triggers latent bugs later in the scheduler on ports with conditional
156 execution. So this is disabled for now. */
157 if (XEXP (src
, 2) == pc_rtx
)
158 return XEXP (src
, 0);
159 else if (XEXP (src
, 1) == pc_rtx
)
161 rtx cond
= XEXP (src
, 0);
162 enum rtx_code revcode
= reversed_comparison_code (cond
, insn
);
164 if (revcode
== UNKNOWN
)
166 return gen_rtx_fmt_ee (revcode
, GET_MODE (cond
), XEXP (cond
, 0),
174 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
177 conditions_mutex_p (rtx cond1
, rtx cond2
)
179 if (COMPARISON_P (cond1
)
180 && COMPARISON_P (cond2
)
181 && GET_CODE (cond1
) == reversed_comparison_code (cond2
, NULL
)
182 && XEXP (cond1
, 0) == XEXP (cond2
, 0)
183 && XEXP (cond1
, 1) == XEXP (cond2
, 1))
188 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
189 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the
190 type of dependence that this link represents. The function returns
191 nonzero if a new entry has been added to insn's LOG_LINK. */
194 add_dependence (rtx insn
, rtx elem
, enum reg_note dep_type
)
200 /* Don't depend an insn on itself. */
204 /* We can get a dependency on deleted insns due to optimizations in
205 the register allocation and reloading or due to splitting. Any
206 such dependency is useless and can be ignored. */
210 /* flow.c doesn't handle conditional lifetimes entirely correctly;
211 calls mess up the conditional lifetimes. */
212 /* ??? add_dependence is the wrong place to be eliding dependencies,
213 as that forgets that the condition expressions themselves may
215 if (!CALL_P (insn
) && !CALL_P (elem
))
217 cond1
= sched_get_condition (insn
);
218 cond2
= sched_get_condition (elem
);
220 && conditions_mutex_p (cond1
, cond2
)
221 /* Make sure first instruction doesn't affect condition of second
222 instruction if switched. */
223 && !modified_in_p (cond1
, elem
)
224 /* Make sure second instruction doesn't affect condition of first
225 instruction if switched. */
226 && !modified_in_p (cond2
, insn
))
231 #ifdef INSN_SCHEDULING
232 /* ??? No good way to tell from here whether we're doing interblock
233 scheduling. Possibly add another callback. */
235 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
236 No need for interblock dependences with calls, since
237 calls are not moved between blocks. Note: the edge where
238 elem is a CALL is still required. */
240 && (INSN_BB (elem
) != INSN_BB (insn
)))
244 /* If we already have a dependency for ELEM, then we do not need to
245 do anything. Avoiding the list walk below can cut compile times
246 dramatically for some code. */
247 if (true_dependency_cache
!= NULL
)
249 enum reg_note present_dep_type
= 0;
251 gcc_assert (anti_dependency_cache
);
252 gcc_assert (output_dependency_cache
);
253 if (bitmap_bit_p (&true_dependency_cache
[INSN_LUID (insn
)],
255 /* Do nothing (present_set_type is already 0). */
257 else if (bitmap_bit_p (&anti_dependency_cache
[INSN_LUID (insn
)],
259 present_dep_type
= REG_DEP_ANTI
;
260 else if (bitmap_bit_p (&output_dependency_cache
[INSN_LUID (insn
)],
262 present_dep_type
= REG_DEP_OUTPUT
;
265 if (present_p
&& (int) dep_type
>= (int) present_dep_type
)
270 /* Check that we don't already have this dependence. */
272 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
273 if (XEXP (link
, 0) == elem
)
275 #ifdef INSN_SCHEDULING
276 /* Clear corresponding cache entry because type of the link
278 if (true_dependency_cache
!= NULL
)
280 enum reg_note kind
= REG_NOTE_KIND (link
);
284 bitmap_clear_bit (&anti_dependency_cache
[INSN_LUID (insn
)],
288 gcc_assert (output_dependency_cache
);
289 bitmap_clear_bit (&output_dependency_cache
[INSN_LUID (insn
)],
298 /* If this is a more restrictive type of dependence than the existing
299 one, then change the existing dependence to this type. */
300 if ((int) dep_type
< (int) REG_NOTE_KIND (link
))
301 PUT_REG_NOTE_KIND (link
, dep_type
);
303 #ifdef INSN_SCHEDULING
304 /* If we are adding a dependency to INSN's LOG_LINKs, then
305 note that in the bitmap caches of dependency information. */
306 if (true_dependency_cache
!= NULL
)
308 if ((int) REG_NOTE_KIND (link
) == 0)
309 bitmap_set_bit (&true_dependency_cache
[INSN_LUID (insn
)],
311 else if (REG_NOTE_KIND (link
) == REG_DEP_ANTI
)
312 bitmap_set_bit (&anti_dependency_cache
[INSN_LUID (insn
)],
314 else if (REG_NOTE_KIND (link
) == REG_DEP_OUTPUT
)
315 bitmap_set_bit (&output_dependency_cache
[INSN_LUID (insn
)],
321 /* Might want to check one level of transitivity to save conses. */
323 link
= alloc_INSN_LIST (elem
, LOG_LINKS (insn
));
324 LOG_LINKS (insn
) = link
;
326 /* Insn dependency, not data dependency. */
327 PUT_REG_NOTE_KIND (link
, dep_type
);
329 #ifdef INSN_SCHEDULING
330 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
331 in the bitmap caches of dependency information. */
332 if (true_dependency_cache
!= NULL
)
334 if ((int) dep_type
== 0)
335 bitmap_set_bit (&true_dependency_cache
[INSN_LUID (insn
)], INSN_LUID (elem
));
336 else if (dep_type
== REG_DEP_ANTI
)
337 bitmap_set_bit (&anti_dependency_cache
[INSN_LUID (insn
)], INSN_LUID (elem
));
338 else if (dep_type
== REG_DEP_OUTPUT
)
339 bitmap_set_bit (&output_dependency_cache
[INSN_LUID (insn
)], INSN_LUID (elem
));
345 /* A convenience wrapper to operate on an entire list. */
348 add_dependence_list (rtx insn
, rtx list
, enum reg_note dep_type
)
350 for (; list
; list
= XEXP (list
, 1))
351 add_dependence (insn
, XEXP (list
, 0), dep_type
);
354 /* Similar, but free *LISTP at the same time. */
357 add_dependence_list_and_free (rtx insn
, rtx
*listp
, enum reg_note dep_type
)
360 for (list
= *listp
, *listp
= NULL
; list
; list
= next
)
362 next
= XEXP (list
, 1);
363 add_dependence (insn
, XEXP (list
, 0), dep_type
);
364 free_INSN_LIST_node (list
);
368 /* Clear all dependencies for an insn. */
371 delete_all_dependences (rtx insn
)
373 /* Clear caches, if they exist, as well as free the dependence. */
375 #ifdef INSN_SCHEDULING
376 if (true_dependency_cache
!= NULL
)
378 bitmap_clear (&true_dependency_cache
[INSN_LUID (insn
)]);
379 bitmap_clear (&anti_dependency_cache
[INSN_LUID (insn
)]);
380 bitmap_clear (&output_dependency_cache
[INSN_LUID (insn
)]);
384 free_INSN_LIST_list (&LOG_LINKS (insn
));
387 /* All insns in a scheduling group except the first should only have
388 dependencies on the previous insn in the group. So we find the
389 first instruction in the scheduling group by walking the dependence
390 chains backwards. Then we add the dependencies for the group to
391 the previous nonnote insn. */
394 fixup_sched_groups (rtx insn
)
398 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
403 i
= prev_nonnote_insn (i
);
405 if (XEXP (link
, 0) == i
)
407 } while (SCHED_GROUP_P (i
));
408 add_dependence (i
, XEXP (link
, 0), REG_NOTE_KIND (link
));
412 delete_all_dependences (insn
);
414 if (BLOCK_FOR_INSN (insn
) == BLOCK_FOR_INSN (prev_nonnote_insn (insn
)))
415 add_dependence (insn
, prev_nonnote_insn (insn
), REG_DEP_ANTI
);
418 /* Process an insn's memory dependencies. There are four kinds of
421 (0) read dependence: read follows read
422 (1) true dependence: read follows write
423 (2) anti dependence: write follows read
424 (3) output dependence: write follows write
426 We are careful to build only dependencies which actually exist, and
427 use transitivity to avoid building too many links. */
429 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
430 The MEM is a memory reference contained within INSN, which we are saving
431 so that we can do memory aliasing on it. */
434 add_insn_mem_dependence (struct deps
*deps
, rtx
*insn_list
, rtx
*mem_list
,
439 link
= alloc_INSN_LIST (insn
, *insn_list
);
442 if (current_sched_info
->use_cselib
)
444 mem
= shallow_copy_rtx (mem
);
445 XEXP (mem
, 0) = cselib_subst_to_values (XEXP (mem
, 0));
447 link
= alloc_EXPR_LIST (VOIDmode
, canon_rtx (mem
), *mem_list
);
450 deps
->pending_lists_length
++;
453 /* Make a dependency between every memory reference on the pending lists
454 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
455 dependencies for a read operation, similarly with FOR_WRITE. */
458 flush_pending_lists (struct deps
*deps
, rtx insn
, int for_read
,
463 add_dependence_list_and_free (insn
, &deps
->pending_read_insns
,
465 free_EXPR_LIST_list (&deps
->pending_read_mems
);
468 add_dependence_list_and_free (insn
, &deps
->pending_write_insns
,
469 for_read
? REG_DEP_ANTI
: REG_DEP_OUTPUT
);
470 free_EXPR_LIST_list (&deps
->pending_write_mems
);
471 deps
->pending_lists_length
= 0;
473 add_dependence_list_and_free (insn
, &deps
->last_pending_memory_flush
,
474 for_read
? REG_DEP_ANTI
: REG_DEP_OUTPUT
);
475 deps
->last_pending_memory_flush
= alloc_INSN_LIST (insn
, NULL_RTX
);
476 deps
->pending_flush_length
= 1;
479 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
480 rtx, X, creating all dependencies generated by the write to the
481 destination of X, and reads of everything mentioned. */
484 sched_analyze_1 (struct deps
*deps
, rtx x
, rtx insn
)
487 rtx dest
= XEXP (x
, 0);
488 enum rtx_code code
= GET_CODE (x
);
493 if (GET_CODE (dest
) == PARALLEL
)
497 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
498 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
499 sched_analyze_1 (deps
,
500 gen_rtx_CLOBBER (VOIDmode
,
501 XEXP (XVECEXP (dest
, 0, i
), 0)),
504 if (GET_CODE (x
) == SET
)
505 sched_analyze_2 (deps
, SET_SRC (x
), insn
);
509 while (GET_CODE (dest
) == STRICT_LOW_PART
|| GET_CODE (dest
) == SUBREG
510 || GET_CODE (dest
) == ZERO_EXTRACT
)
512 if (GET_CODE (dest
) == STRICT_LOW_PART
513 || GET_CODE (dest
) == ZERO_EXTRACT
514 || read_modify_subreg_p (dest
))
516 /* These both read and modify the result. We must handle
517 them as writes to get proper dependencies for following
518 instructions. We must handle them as reads to get proper
519 dependencies from this to previous instructions.
520 Thus we need to call sched_analyze_2. */
522 sched_analyze_2 (deps
, XEXP (dest
, 0), insn
);
524 if (GET_CODE (dest
) == ZERO_EXTRACT
)
526 /* The second and third arguments are values read by this insn. */
527 sched_analyze_2 (deps
, XEXP (dest
, 1), insn
);
528 sched_analyze_2 (deps
, XEXP (dest
, 2), insn
);
530 dest
= XEXP (dest
, 0);
535 regno
= REGNO (dest
);
538 /* Treat all writes to a stack register as modifying the TOS. */
539 if (regno
>= FIRST_STACK_REG
&& regno
<= LAST_STACK_REG
)
541 SET_REGNO_REG_SET (reg_pending_uses
, FIRST_STACK_REG
);
542 regno
= FIRST_STACK_REG
;
546 /* A hard reg in a wide mode may really be multiple registers.
547 If so, mark all of them just like the first. */
548 if (regno
< FIRST_PSEUDO_REGISTER
)
550 int i
= hard_regno_nregs
[regno
][GET_MODE (dest
)];
554 SET_REGNO_REG_SET (reg_pending_sets
, regno
+ i
);
559 SET_REGNO_REG_SET (reg_pending_clobbers
, regno
+ i
);
562 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
563 it does not reload. Ignore these as they have served their
565 else if (regno
>= deps
->max_reg
)
567 gcc_assert (GET_CODE (PATTERN (insn
)) == USE
568 || GET_CODE (PATTERN (insn
)) == CLOBBER
);
573 SET_REGNO_REG_SET (reg_pending_sets
, regno
);
575 SET_REGNO_REG_SET (reg_pending_clobbers
, regno
);
577 /* Pseudos that are REG_EQUIV to something may be replaced
578 by that during reloading. We need only add dependencies for
579 the address in the REG_EQUIV note. */
580 if (!reload_completed
&& get_reg_known_equiv_p (regno
))
582 rtx t
= get_reg_known_value (regno
);
584 sched_analyze_2 (deps
, XEXP (t
, 0), insn
);
587 /* Don't let it cross a call after scheduling if it doesn't
588 already cross one. */
589 if (REG_N_CALLS_CROSSED (regno
) == 0)
590 add_dependence_list (insn
, deps
->last_function_call
, REG_DEP_ANTI
);
593 else if (MEM_P (dest
))
595 /* Writing memory. */
598 if (current_sched_info
->use_cselib
)
600 t
= shallow_copy_rtx (dest
);
601 cselib_lookup (XEXP (t
, 0), Pmode
, 1);
602 XEXP (t
, 0) = cselib_subst_to_values (XEXP (t
, 0));
606 if (deps
->pending_lists_length
> MAX_PENDING_LIST_LENGTH
)
608 /* Flush all pending reads and writes to prevent the pending lists
609 from getting any larger. Insn scheduling runs too slowly when
610 these lists get long. When compiling GCC with itself,
611 this flush occurs 8 times for sparc, and 10 times for m88k using
612 the default value of 32. */
613 flush_pending_lists (deps
, insn
, false, true);
617 rtx pending
, pending_mem
;
619 pending
= deps
->pending_read_insns
;
620 pending_mem
= deps
->pending_read_mems
;
623 if (anti_dependence (XEXP (pending_mem
, 0), t
))
624 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_ANTI
);
626 pending
= XEXP (pending
, 1);
627 pending_mem
= XEXP (pending_mem
, 1);
630 pending
= deps
->pending_write_insns
;
631 pending_mem
= deps
->pending_write_mems
;
634 if (output_dependence (XEXP (pending_mem
, 0), t
))
635 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
637 pending
= XEXP (pending
, 1);
638 pending_mem
= XEXP (pending_mem
, 1);
641 add_dependence_list (insn
, deps
->last_pending_memory_flush
,
644 add_insn_mem_dependence (deps
, &deps
->pending_write_insns
,
645 &deps
->pending_write_mems
, insn
, dest
);
647 sched_analyze_2 (deps
, XEXP (dest
, 0), insn
);
651 if (GET_CODE (x
) == SET
)
652 sched_analyze_2 (deps
, SET_SRC (x
), insn
);
655 /* Analyze the uses of memory and registers in rtx X in INSN. */
658 sched_analyze_2 (struct deps
*deps
, rtx x
, rtx insn
)
678 /* Ignore constants. Note that we must handle CONST_DOUBLE here
679 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
680 this does not mean that this insn is using cc0. */
685 /* User of CC0 depends on immediately preceding insn. */
686 SCHED_GROUP_P (insn
) = 1;
687 /* Don't move CC0 setter to another block (it can set up the
688 same flag for previous CC0 users which is safe). */
689 CANT_MOVE (prev_nonnote_insn (insn
)) = 1;
695 int regno
= REGNO (x
);
698 /* Treat all reads of a stack register as modifying the TOS. */
699 if (regno
>= FIRST_STACK_REG
&& regno
<= LAST_STACK_REG
)
701 SET_REGNO_REG_SET (reg_pending_sets
, FIRST_STACK_REG
);
702 regno
= FIRST_STACK_REG
;
706 if (regno
< FIRST_PSEUDO_REGISTER
)
708 int i
= hard_regno_nregs
[regno
][GET_MODE (x
)];
710 SET_REGNO_REG_SET (reg_pending_uses
, regno
+ i
);
712 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
713 it does not reload. Ignore these as they have served their
715 else if (regno
>= deps
->max_reg
)
717 gcc_assert (GET_CODE (PATTERN (insn
)) == USE
718 || GET_CODE (PATTERN (insn
)) == CLOBBER
);
722 SET_REGNO_REG_SET (reg_pending_uses
, regno
);
724 /* Pseudos that are REG_EQUIV to something may be replaced
725 by that during reloading. We need only add dependencies for
726 the address in the REG_EQUIV note. */
727 if (!reload_completed
&& get_reg_known_equiv_p (regno
))
729 rtx t
= get_reg_known_value (regno
);
731 sched_analyze_2 (deps
, XEXP (t
, 0), insn
);
734 /* If the register does not already cross any calls, then add this
735 insn to the sched_before_next_call list so that it will still
736 not cross calls after scheduling. */
737 if (REG_N_CALLS_CROSSED (regno
) == 0)
738 deps
->sched_before_next_call
739 = alloc_INSN_LIST (insn
, deps
->sched_before_next_call
);
746 /* Reading memory. */
748 rtx pending
, pending_mem
;
751 if (current_sched_info
->use_cselib
)
753 t
= shallow_copy_rtx (t
);
754 cselib_lookup (XEXP (t
, 0), Pmode
, 1);
755 XEXP (t
, 0) = cselib_subst_to_values (XEXP (t
, 0));
758 pending
= deps
->pending_read_insns
;
759 pending_mem
= deps
->pending_read_mems
;
762 if (read_dependence (XEXP (pending_mem
, 0), t
))
763 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_ANTI
);
765 pending
= XEXP (pending
, 1);
766 pending_mem
= XEXP (pending_mem
, 1);
769 pending
= deps
->pending_write_insns
;
770 pending_mem
= deps
->pending_write_mems
;
773 if (true_dependence (XEXP (pending_mem
, 0), VOIDmode
,
775 add_dependence (insn
, XEXP (pending
, 0), 0);
777 pending
= XEXP (pending
, 1);
778 pending_mem
= XEXP (pending_mem
, 1);
781 for (u
= deps
->last_pending_memory_flush
; u
; u
= XEXP (u
, 1))
782 if (!JUMP_P (XEXP (u
, 0))
783 || deps_may_trap_p (x
))
784 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
786 /* Always add these dependencies to pending_reads, since
787 this insn may be followed by a write. */
788 add_insn_mem_dependence (deps
, &deps
->pending_read_insns
,
789 &deps
->pending_read_mems
, insn
, x
);
791 /* Take advantage of tail recursion here. */
792 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
796 /* Force pending stores to memory in case a trap handler needs them. */
798 flush_pending_lists (deps
, insn
, true, false);
803 case UNSPEC_VOLATILE
:
805 /* Traditional and volatile asm instructions must be considered to use
806 and clobber all hard registers, all pseudo-registers and all of
807 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
809 Consider for instance a volatile asm that changes the fpu rounding
810 mode. An insn should not be moved across this even if it only uses
811 pseudo-regs because it might give an incorrectly rounded result. */
812 if (code
!= ASM_OPERANDS
|| MEM_VOLATILE_P (x
))
813 reg_pending_barrier
= TRUE_BARRIER
;
815 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
816 We can not just fall through here since then we would be confused
817 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
818 traditional asms unlike their normal usage. */
820 if (code
== ASM_OPERANDS
)
822 for (j
= 0; j
< ASM_OPERANDS_INPUT_LENGTH (x
); j
++)
823 sched_analyze_2 (deps
, ASM_OPERANDS_INPUT (x
, j
), insn
);
833 /* These both read and modify the result. We must handle them as writes
834 to get proper dependencies for following instructions. We must handle
835 them as reads to get proper dependencies from this to previous
836 instructions. Thus we need to pass them to both sched_analyze_1
837 and sched_analyze_2. We must call sched_analyze_2 first in order
838 to get the proper antecedent for the read. */
839 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
840 sched_analyze_1 (deps
, x
, insn
);
845 /* op0 = op0 + op1 */
846 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
847 sched_analyze_2 (deps
, XEXP (x
, 1), insn
);
848 sched_analyze_1 (deps
, x
, insn
);
855 /* Other cases: walk the insn. */
856 fmt
= GET_RTX_FORMAT (code
);
857 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
860 sched_analyze_2 (deps
, XEXP (x
, i
), insn
);
861 else if (fmt
[i
] == 'E')
862 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
863 sched_analyze_2 (deps
, XVECEXP (x
, i
, j
), insn
);
867 /* Analyze an INSN with pattern X to find all dependencies. */
870 sched_analyze_insn (struct deps
*deps
, rtx x
, rtx insn
, rtx loop_notes
)
872 RTX_CODE code
= GET_CODE (x
);
875 reg_set_iterator rsi
;
877 if (code
== COND_EXEC
)
879 sched_analyze_2 (deps
, COND_EXEC_TEST (x
), insn
);
881 /* ??? Should be recording conditions so we reduce the number of
882 false dependencies. */
883 x
= COND_EXEC_CODE (x
);
886 if (code
== SET
|| code
== CLOBBER
)
888 sched_analyze_1 (deps
, x
, insn
);
890 /* Bare clobber insns are used for letting life analysis, reg-stack
891 and others know that a value is dead. Depend on the last call
892 instruction so that reg-stack won't get confused. */
894 add_dependence_list (insn
, deps
->last_function_call
, REG_DEP_OUTPUT
);
896 else if (code
== PARALLEL
)
898 for (i
= XVECLEN (x
, 0); i
--;)
900 rtx sub
= XVECEXP (x
, 0, i
);
901 code
= GET_CODE (sub
);
903 if (code
== COND_EXEC
)
905 sched_analyze_2 (deps
, COND_EXEC_TEST (sub
), insn
);
906 sub
= COND_EXEC_CODE (sub
);
907 code
= GET_CODE (sub
);
909 if (code
== SET
|| code
== CLOBBER
)
910 sched_analyze_1 (deps
, sub
, insn
);
912 sched_analyze_2 (deps
, sub
, insn
);
916 sched_analyze_2 (deps
, x
, insn
);
918 /* Mark registers CLOBBERED or used by called function. */
921 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
923 if (GET_CODE (XEXP (link
, 0)) == CLOBBER
)
924 sched_analyze_1 (deps
, XEXP (link
, 0), insn
);
926 sched_analyze_2 (deps
, XEXP (link
, 0), insn
);
928 if (find_reg_note (insn
, REG_SETJMP
, NULL
))
929 reg_pending_barrier
= MOVE_BARRIER
;
935 next
= next_nonnote_insn (insn
);
936 if (next
&& BARRIER_P (next
))
937 reg_pending_barrier
= TRUE_BARRIER
;
940 rtx pending
, pending_mem
;
941 regset_head tmp_uses
, tmp_sets
;
942 INIT_REG_SET (&tmp_uses
);
943 INIT_REG_SET (&tmp_sets
);
945 (*current_sched_info
->compute_jump_reg_dependencies
)
946 (insn
, &deps
->reg_conditional_sets
, &tmp_uses
, &tmp_sets
);
947 /* Make latency of jump equal to 0 by using anti-dependence. */
948 EXECUTE_IF_SET_IN_REG_SET (&tmp_uses
, 0, i
, rsi
)
950 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
951 add_dependence_list (insn
, reg_last
->sets
, REG_DEP_ANTI
);
952 add_dependence_list (insn
, reg_last
->clobbers
, REG_DEP_ANTI
);
953 reg_last
->uses_length
++;
954 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
956 IOR_REG_SET (reg_pending_sets
, &tmp_sets
);
958 CLEAR_REG_SET (&tmp_uses
);
959 CLEAR_REG_SET (&tmp_sets
);
961 /* All memory writes and volatile reads must happen before the
962 jump. Non-volatile reads must happen before the jump iff
963 the result is needed by the above register used mask. */
965 pending
= deps
->pending_write_insns
;
966 pending_mem
= deps
->pending_write_mems
;
969 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
970 pending
= XEXP (pending
, 1);
971 pending_mem
= XEXP (pending_mem
, 1);
974 pending
= deps
->pending_read_insns
;
975 pending_mem
= deps
->pending_read_mems
;
978 if (MEM_VOLATILE_P (XEXP (pending_mem
, 0)))
979 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
980 pending
= XEXP (pending
, 1);
981 pending_mem
= XEXP (pending_mem
, 1);
984 add_dependence_list (insn
, deps
->last_pending_memory_flush
,
989 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
990 block, then we must be sure that no instructions are scheduled across it.
991 Otherwise, the reg_n_refs info (which depends on loop_depth) would
997 /* Update loop_notes with any notes from this insn. */
999 while (XEXP (link
, 1))
1001 gcc_assert (INTVAL (XEXP (link
, 0)) == NOTE_INSN_LOOP_BEG
1002 || INTVAL (XEXP (link
, 0)) == NOTE_INSN_LOOP_END
);
1004 reg_pending_barrier
= MOVE_BARRIER
;
1005 link
= XEXP (link
, 1);
1007 XEXP (link
, 1) = REG_NOTES (insn
);
1008 REG_NOTES (insn
) = loop_notes
;
1011 /* If this instruction can throw an exception, then moving it changes
1012 where block boundaries fall. This is mighty confusing elsewhere.
1013 Therefore, prevent such an instruction from being moved. */
1014 if (can_throw_internal (insn
))
1015 reg_pending_barrier
= MOVE_BARRIER
;
1017 /* Add dependencies if a scheduling barrier was found. */
1018 if (reg_pending_barrier
)
1020 /* In the case of barrier the most added dependencies are not
1021 real, so we use anti-dependence here. */
1022 if (GET_CODE (PATTERN (insn
)) == COND_EXEC
)
1024 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
, rsi
)
1026 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1027 add_dependence_list (insn
, reg_last
->uses
, REG_DEP_ANTI
);
1029 (insn
, reg_last
->sets
,
1030 reg_pending_barrier
== TRUE_BARRIER
? 0 : REG_DEP_ANTI
);
1032 (insn
, reg_last
->clobbers
,
1033 reg_pending_barrier
== TRUE_BARRIER
? 0 : REG_DEP_ANTI
);
1038 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
, rsi
)
1040 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1041 add_dependence_list_and_free (insn
, ®_last
->uses
,
1043 add_dependence_list_and_free
1044 (insn
, ®_last
->sets
,
1045 reg_pending_barrier
== TRUE_BARRIER
? 0 : REG_DEP_ANTI
);
1046 add_dependence_list_and_free
1047 (insn
, ®_last
->clobbers
,
1048 reg_pending_barrier
== TRUE_BARRIER
? 0 : REG_DEP_ANTI
);
1049 reg_last
->uses_length
= 0;
1050 reg_last
->clobbers_length
= 0;
1054 for (i
= 0; i
< (unsigned)deps
->max_reg
; i
++)
1056 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1057 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
1058 SET_REGNO_REG_SET (&deps
->reg_last_in_use
, i
);
1061 flush_pending_lists (deps
, insn
, true, true);
1062 CLEAR_REG_SET (&deps
->reg_conditional_sets
);
1063 reg_pending_barrier
= NOT_A_BARRIER
;
1067 /* If the current insn is conditional, we can't free any
1069 if (GET_CODE (PATTERN (insn
)) == COND_EXEC
)
1071 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses
, 0, i
, rsi
)
1073 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1074 add_dependence_list (insn
, reg_last
->sets
, 0);
1075 add_dependence_list (insn
, reg_last
->clobbers
, 0);
1076 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
1077 reg_last
->uses_length
++;
1079 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers
, 0, i
, rsi
)
1081 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1082 add_dependence_list (insn
, reg_last
->sets
, REG_DEP_OUTPUT
);
1083 add_dependence_list (insn
, reg_last
->uses
, REG_DEP_ANTI
);
1084 reg_last
->clobbers
= alloc_INSN_LIST (insn
, reg_last
->clobbers
);
1085 reg_last
->clobbers_length
++;
1087 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets
, 0, i
, rsi
)
1089 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1090 add_dependence_list (insn
, reg_last
->sets
, REG_DEP_OUTPUT
);
1091 add_dependence_list (insn
, reg_last
->clobbers
, REG_DEP_OUTPUT
);
1092 add_dependence_list (insn
, reg_last
->uses
, REG_DEP_ANTI
);
1093 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
1094 SET_REGNO_REG_SET (&deps
->reg_conditional_sets
, i
);
1099 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses
, 0, i
, rsi
)
1101 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1102 add_dependence_list (insn
, reg_last
->sets
, 0);
1103 add_dependence_list (insn
, reg_last
->clobbers
, 0);
1104 reg_last
->uses_length
++;
1105 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
1107 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers
, 0, i
, rsi
)
1109 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1110 if (reg_last
->uses_length
> MAX_PENDING_LIST_LENGTH
1111 || reg_last
->clobbers_length
> MAX_PENDING_LIST_LENGTH
)
1113 add_dependence_list_and_free (insn
, ®_last
->sets
,
1115 add_dependence_list_and_free (insn
, ®_last
->uses
,
1117 add_dependence_list_and_free (insn
, ®_last
->clobbers
,
1119 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
1120 reg_last
->clobbers_length
= 0;
1121 reg_last
->uses_length
= 0;
1125 add_dependence_list (insn
, reg_last
->sets
, REG_DEP_OUTPUT
);
1126 add_dependence_list (insn
, reg_last
->uses
, REG_DEP_ANTI
);
1128 reg_last
->clobbers_length
++;
1129 reg_last
->clobbers
= alloc_INSN_LIST (insn
, reg_last
->clobbers
);
1131 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets
, 0, i
, rsi
)
1133 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1134 add_dependence_list_and_free (insn
, ®_last
->sets
,
1136 add_dependence_list_and_free (insn
, ®_last
->clobbers
,
1138 add_dependence_list_and_free (insn
, ®_last
->uses
,
1140 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
1141 reg_last
->uses_length
= 0;
1142 reg_last
->clobbers_length
= 0;
1143 CLEAR_REGNO_REG_SET (&deps
->reg_conditional_sets
, i
);
1147 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_uses
);
1148 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_clobbers
);
1149 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_sets
);
1151 CLEAR_REG_SET (reg_pending_uses
);
1152 CLEAR_REG_SET (reg_pending_clobbers
);
1153 CLEAR_REG_SET (reg_pending_sets
);
1155 /* If we are currently in a libcall scheduling group, then mark the
1156 current insn as being in a scheduling group and that it can not
1157 be moved into a different basic block. */
1159 if (deps
->libcall_block_tail_insn
)
1161 SCHED_GROUP_P (insn
) = 1;
1162 CANT_MOVE (insn
) = 1;
1165 /* If a post-call group is still open, see if it should remain so.
1166 This insn must be a simple move of a hard reg to a pseudo or
1169 We must avoid moving these insns for correctness on
1170 SMALL_REGISTER_CLASS machines, and for special registers like
1171 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1172 hard regs for all targets. */
1174 if (deps
->in_post_call_group_p
)
1176 rtx tmp
, set
= single_set (insn
);
1177 int src_regno
, dest_regno
;
1180 goto end_call_group
;
1182 tmp
= SET_DEST (set
);
1183 if (GET_CODE (tmp
) == SUBREG
)
1184 tmp
= SUBREG_REG (tmp
);
1186 dest_regno
= REGNO (tmp
);
1188 goto end_call_group
;
1190 tmp
= SET_SRC (set
);
1191 if (GET_CODE (tmp
) == SUBREG
)
1192 tmp
= SUBREG_REG (tmp
);
1193 if ((GET_CODE (tmp
) == PLUS
1194 || GET_CODE (tmp
) == MINUS
)
1195 && REG_P (XEXP (tmp
, 0))
1196 && REGNO (XEXP (tmp
, 0)) == STACK_POINTER_REGNUM
1197 && dest_regno
== STACK_POINTER_REGNUM
)
1198 src_regno
= STACK_POINTER_REGNUM
;
1199 else if (REG_P (tmp
))
1200 src_regno
= REGNO (tmp
);
1202 goto end_call_group
;
1204 if (src_regno
< FIRST_PSEUDO_REGISTER
1205 || dest_regno
< FIRST_PSEUDO_REGISTER
)
1207 if (deps
->in_post_call_group_p
== post_call_initial
)
1208 deps
->in_post_call_group_p
= post_call
;
1210 SCHED_GROUP_P (insn
) = 1;
1211 CANT_MOVE (insn
) = 1;
1216 deps
->in_post_call_group_p
= not_post_call
;
1220 /* Fixup the dependencies in the sched group. */
1221 if (SCHED_GROUP_P (insn
))
1222 fixup_sched_groups (insn
);
1225 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1226 for every dependency. */
1229 sched_analyze (struct deps
*deps
, rtx head
, rtx tail
)
1234 if (current_sched_info
->use_cselib
)
1237 /* Before reload, if the previous block ended in a call, show that
1238 we are inside a post-call group, so as to keep the lifetimes of
1239 hard registers correct. */
1240 if (! reload_completed
&& !LABEL_P (head
))
1242 insn
= prev_nonnote_insn (head
);
1243 if (insn
&& CALL_P (insn
))
1244 deps
->in_post_call_group_p
= post_call_initial
;
1246 for (insn
= head
;; insn
= NEXT_INSN (insn
))
1248 rtx link
, end_seq
, r0
, set
;
1250 if (NONJUMP_INSN_P (insn
) || JUMP_P (insn
))
1252 /* Clear out the stale LOG_LINKS from flow. */
1253 free_INSN_LIST_list (&LOG_LINKS (insn
));
1255 /* Make each JUMP_INSN a scheduling barrier for memory
1259 /* Keep the list a reasonable size. */
1260 if (deps
->pending_flush_length
++ > MAX_PENDING_LIST_LENGTH
)
1261 flush_pending_lists (deps
, insn
, true, true);
1263 deps
->last_pending_memory_flush
1264 = alloc_INSN_LIST (insn
, deps
->last_pending_memory_flush
);
1266 sched_analyze_insn (deps
, PATTERN (insn
), insn
, loop_notes
);
1269 else if (CALL_P (insn
))
1273 CANT_MOVE (insn
) = 1;
1275 /* Clear out the stale LOG_LINKS from flow. */
1276 free_INSN_LIST_list (&LOG_LINKS (insn
));
1278 if (find_reg_note (insn
, REG_SETJMP
, NULL
))
1280 /* This is setjmp. Assume that all registers, not just
1281 hard registers, may be clobbered by this call. */
1282 reg_pending_barrier
= MOVE_BARRIER
;
1286 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
1287 /* A call may read and modify global register variables. */
1290 SET_REGNO_REG_SET (reg_pending_sets
, i
);
1291 SET_REGNO_REG_SET (reg_pending_uses
, i
);
1293 /* Other call-clobbered hard regs may be clobbered.
1294 Since we only have a choice between 'might be clobbered'
1295 and 'definitely not clobbered', we must include all
1296 partly call-clobbered registers here. */
1297 else if (HARD_REGNO_CALL_PART_CLOBBERED (i
, reg_raw_mode
[i
])
1298 || TEST_HARD_REG_BIT (regs_invalidated_by_call
, i
))
1299 SET_REGNO_REG_SET (reg_pending_clobbers
, i
);
1300 /* We don't know what set of fixed registers might be used
1301 by the function, but it is certain that the stack pointer
1302 is among them, but be conservative. */
1303 else if (fixed_regs
[i
])
1304 SET_REGNO_REG_SET (reg_pending_uses
, i
);
1305 /* The frame pointer is normally not used by the function
1306 itself, but by the debugger. */
1307 /* ??? MIPS o32 is an exception. It uses the frame pointer
1308 in the macro expansion of jal but does not represent this
1309 fact in the call_insn rtl. */
1310 else if (i
== FRAME_POINTER_REGNUM
1311 || (i
== HARD_FRAME_POINTER_REGNUM
1312 && (! reload_completed
|| frame_pointer_needed
)))
1313 SET_REGNO_REG_SET (reg_pending_uses
, i
);
1316 /* For each insn which shouldn't cross a call, add a dependence
1317 between that insn and this call insn. */
1318 add_dependence_list_and_free (insn
, &deps
->sched_before_next_call
,
1321 sched_analyze_insn (deps
, PATTERN (insn
), insn
, loop_notes
);
1324 /* In the absence of interprocedural alias analysis, we must flush
1325 all pending reads and writes, and start new dependencies starting
1326 from here. But only flush writes for constant calls (which may
1327 be passed a pointer to something we haven't written yet). */
1328 flush_pending_lists (deps
, insn
, true, !CONST_OR_PURE_CALL_P (insn
));
1330 /* Remember the last function call for limiting lifetimes. */
1331 free_INSN_LIST_list (&deps
->last_function_call
);
1332 deps
->last_function_call
= alloc_INSN_LIST (insn
, NULL_RTX
);
1334 /* Before reload, begin a post-call group, so as to keep the
1335 lifetimes of hard registers correct. */
1336 if (! reload_completed
)
1337 deps
->in_post_call_group_p
= post_call
;
1340 /* EH_REGION insn notes can not appear until well after we complete
1343 gcc_assert (NOTE_LINE_NUMBER (insn
) != NOTE_INSN_EH_REGION_BEG
1344 && NOTE_LINE_NUMBER (insn
) != NOTE_INSN_EH_REGION_END
);
1346 /* See comments on reemit_notes as to why we do this.
1347 ??? Actually, the reemit_notes just say what is done, not why. */
1350 && (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
1351 || NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_END
))
1353 loop_notes
= alloc_EXPR_LIST (REG_SAVE_NOTE
,
1354 GEN_INT (NOTE_LINE_NUMBER (insn
)),
1356 CONST_OR_PURE_CALL_P (loop_notes
) = CONST_OR_PURE_CALL_P (insn
);
1359 if (current_sched_info
->use_cselib
)
1360 cselib_process_insn (insn
);
1362 /* Now that we have completed handling INSN, check and see if it is
1363 a CLOBBER beginning a libcall block. If it is, record the
1364 end of the libcall sequence.
1366 We want to schedule libcall blocks as a unit before reload. While
1367 this restricts scheduling, it preserves the meaning of a libcall
1370 As a side effect, we may get better code due to decreased register
1371 pressure as well as less chance of a foreign insn appearing in
1373 if (!reload_completed
1374 /* Note we may have nested libcall sequences. We only care about
1375 the outermost libcall sequence. */
1376 && deps
->libcall_block_tail_insn
== 0
1377 /* The sequence must start with a clobber of a register. */
1378 && NONJUMP_INSN_P (insn
)
1379 && GET_CODE (PATTERN (insn
)) == CLOBBER
1380 && (r0
= XEXP (PATTERN (insn
), 0), REG_P (r0
))
1381 && REG_P (XEXP (PATTERN (insn
), 0))
1382 /* The CLOBBER must also have a REG_LIBCALL note attached. */
1383 && (link
= find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
)) != 0
1384 && (end_seq
= XEXP (link
, 0)) != 0
1385 /* The insn referenced by the REG_LIBCALL note must be a
1386 simple nop copy with the same destination as the register
1387 mentioned in the clobber. */
1388 && (set
= single_set (end_seq
)) != 0
1389 && SET_DEST (set
) == r0
&& SET_SRC (set
) == r0
1390 /* And finally the insn referenced by the REG_LIBCALL must
1391 also contain a REG_EQUAL note and a REG_RETVAL note. */
1392 && find_reg_note (end_seq
, REG_EQUAL
, NULL_RTX
) != 0
1393 && find_reg_note (end_seq
, REG_RETVAL
, NULL_RTX
) != 0)
1394 deps
->libcall_block_tail_insn
= XEXP (link
, 0);
1396 /* If we have reached the end of a libcall block, then close the
1398 if (deps
->libcall_block_tail_insn
== insn
)
1399 deps
->libcall_block_tail_insn
= 0;
1403 if (current_sched_info
->use_cselib
)
1412 /* The following function adds forward dependence (FROM, TO) with
1413 given DEP_TYPE. The forward dependence should be not exist before. */
1416 add_forward_dependence (rtx from
, rtx to
, enum reg_note dep_type
)
1420 #ifdef ENABLE_CHECKING
1421 /* If add_dependence is working properly there should never
1422 be notes, deleted insns or duplicates in the backward
1423 links. Thus we need not check for them here.
1425 However, if we have enabled checking we might as well go
1426 ahead and verify that add_dependence worked properly. */
1427 gcc_assert (!NOTE_P (from
));
1428 gcc_assert (!INSN_DELETED_P (from
));
1429 if (forward_dependency_cache
)
1430 gcc_assert (!bitmap_bit_p (&forward_dependency_cache
[INSN_LUID (from
)],
1433 gcc_assert (!find_insn_list (to
, INSN_DEPEND (from
)));
1435 /* ??? If bitmap_bit_p is a predicate, what is this supposed to do? */
1436 if (forward_dependency_cache
!= NULL
)
1437 bitmap_bit_p (&forward_dependency_cache
[INSN_LUID (from
)],
1441 new_link
= alloc_INSN_LIST (to
, INSN_DEPEND (from
));
1443 PUT_REG_NOTE_KIND (new_link
, dep_type
);
1445 INSN_DEPEND (from
) = new_link
;
1446 INSN_DEP_COUNT (to
) += 1;
1449 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1450 dependences from LOG_LINKS to build forward dependences in
1454 compute_forward_dependences (rtx head
, rtx tail
)
1459 next_tail
= NEXT_INSN (tail
);
1460 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
1462 if (! INSN_P (insn
))
1465 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
1466 add_forward_dependence (XEXP (link
, 0), insn
, REG_NOTE_KIND (link
));
1470 /* Initialize variables for region data dependence analysis.
1471 n_bbs is the number of region blocks. */
1474 init_deps (struct deps
*deps
)
1476 int max_reg
= (reload_completed
? FIRST_PSEUDO_REGISTER
: max_reg_num ());
1478 deps
->max_reg
= max_reg
;
1479 deps
->reg_last
= xcalloc (max_reg
, sizeof (struct deps_reg
));
1480 INIT_REG_SET (&deps
->reg_last_in_use
);
1481 INIT_REG_SET (&deps
->reg_conditional_sets
);
1483 deps
->pending_read_insns
= 0;
1484 deps
->pending_read_mems
= 0;
1485 deps
->pending_write_insns
= 0;
1486 deps
->pending_write_mems
= 0;
1487 deps
->pending_lists_length
= 0;
1488 deps
->pending_flush_length
= 0;
1489 deps
->last_pending_memory_flush
= 0;
1490 deps
->last_function_call
= 0;
1491 deps
->sched_before_next_call
= 0;
1492 deps
->in_post_call_group_p
= not_post_call
;
1493 deps
->libcall_block_tail_insn
= 0;
1496 /* Free insn lists found in DEPS. */
1499 free_deps (struct deps
*deps
)
1502 reg_set_iterator rsi
;
1504 free_INSN_LIST_list (&deps
->pending_read_insns
);
1505 free_EXPR_LIST_list (&deps
->pending_read_mems
);
1506 free_INSN_LIST_list (&deps
->pending_write_insns
);
1507 free_EXPR_LIST_list (&deps
->pending_write_mems
);
1508 free_INSN_LIST_list (&deps
->last_pending_memory_flush
);
1510 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
1511 times. For a testcase with 42000 regs and 8000 small basic blocks,
1512 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
1513 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
, rsi
)
1515 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1517 free_INSN_LIST_list (®_last
->uses
);
1519 free_INSN_LIST_list (®_last
->sets
);
1520 if (reg_last
->clobbers
)
1521 free_INSN_LIST_list (®_last
->clobbers
);
1523 CLEAR_REG_SET (&deps
->reg_last_in_use
);
1524 CLEAR_REG_SET (&deps
->reg_conditional_sets
);
1526 free (deps
->reg_last
);
1529 /* If it is profitable to use them, initialize caches for tracking
1530 dependency information. LUID is the number of insns to be scheduled,
1531 it is used in the estimate of profitability. */
1534 init_dependency_caches (int luid
)
1536 /* ?!? We could save some memory by computing a per-region luid mapping
1537 which could reduce both the number of vectors in the cache and the size
1538 of each vector. Instead we just avoid the cache entirely unless the
1539 average number of instructions in a basic block is very high. See
1540 the comment before the declaration of true_dependency_cache for
1541 what we consider "very high". */
1542 if (luid
/ n_basic_blocks
> 100 * 5)
1545 true_dependency_cache
= xmalloc (luid
* sizeof (bitmap_head
));
1546 anti_dependency_cache
= xmalloc (luid
* sizeof (bitmap_head
));
1547 output_dependency_cache
= xmalloc (luid
* sizeof (bitmap_head
));
1548 #ifdef ENABLE_CHECKING
1549 forward_dependency_cache
= xmalloc (luid
* sizeof (bitmap_head
));
1551 for (i
= 0; i
< luid
; i
++)
1553 bitmap_initialize (&true_dependency_cache
[i
], 0);
1554 bitmap_initialize (&anti_dependency_cache
[i
], 0);
1555 bitmap_initialize (&output_dependency_cache
[i
], 0);
1556 #ifdef ENABLE_CHECKING
1557 bitmap_initialize (&forward_dependency_cache
[i
], 0);
1564 /* Free the caches allocated in init_dependency_caches. */
1567 free_dependency_caches (void)
1569 if (true_dependency_cache
)
1573 for (i
= 0; i
< cache_size
; i
++)
1575 bitmap_clear (&true_dependency_cache
[i
]);
1576 bitmap_clear (&anti_dependency_cache
[i
]);
1577 bitmap_clear (&output_dependency_cache
[i
]);
1578 #ifdef ENABLE_CHECKING
1579 bitmap_clear (&forward_dependency_cache
[i
]);
1582 free (true_dependency_cache
);
1583 true_dependency_cache
= NULL
;
1584 free (anti_dependency_cache
);
1585 anti_dependency_cache
= NULL
;
1586 free (output_dependency_cache
);
1587 output_dependency_cache
= NULL
;
1588 #ifdef ENABLE_CHECKING
1589 free (forward_dependency_cache
);
1590 forward_dependency_cache
= NULL
;
1595 /* Initialize some global variables needed by the dependency analysis
1599 init_deps_global (void)
1601 reg_pending_sets
= ALLOC_REG_SET (®_obstack
);
1602 reg_pending_clobbers
= ALLOC_REG_SET (®_obstack
);
1603 reg_pending_uses
= ALLOC_REG_SET (®_obstack
);
1604 reg_pending_barrier
= NOT_A_BARRIER
;
1607 /* Free everything used by the dependency analysis code. */
1610 finish_deps_global (void)
1612 FREE_REG_SET (reg_pending_sets
);
1613 FREE_REG_SET (reg_pending_clobbers
);
1614 FREE_REG_SET (reg_pending_uses
);