* config/i386/unix.h (ASM_OUTPUT_MI_THUNK): Don't capitalize
[official-gcc.git] / gcc / sched-deps.c
blob3662b7a7ec441d6e503c0c370b2225e0586422be
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GNU CC.
10 GNU CC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published by the
12 Free Software Foundation; either version 2, or (at your option) any
13 later version.
15 GNU CC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GNU CC; see the file COPYING. If not, write to the Free
22 the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA
23 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "toplev.h"
28 #include "rtl.h"
29 #include "tm_p.h"
30 #include "hard-reg-set.h"
31 #include "basic-block.h"
32 #include "regs.h"
33 #include "function.h"
34 #include "flags.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
37 #include "except.h"
38 #include "toplev.h"
39 #include "recog.h"
40 #include "sched-int.h"
41 #include "params.h"
42 #include "cselib.h"
44 extern char *reg_known_equiv_p;
45 extern rtx *reg_known_value;
47 static regset_head reg_pending_sets_head;
48 static regset_head reg_pending_clobbers_head;
50 static regset reg_pending_sets;
51 static regset reg_pending_clobbers;
52 static int reg_pending_sets_all;
54 /* To speed up the test for duplicate dependency links we keep a
55 record of dependencies created by add_dependence when the average
56 number of instructions in a basic block is very large.
58 Studies have shown that there is typically around 5 instructions between
59 branches for typical C code. So we can make a guess that the average
60 basic block is approximately 5 instructions long; we will choose 100X
61 the average size as a very large basic block.
63 Each insn has associated bitmaps for its dependencies. Each bitmap
64 has enough entries to represent a dependency on any other insn in
65 the insn chain. All bitmap for true dependencies cache is
66 allocated then the rest two ones are also allocated. */
67 static sbitmap *true_dependency_cache;
68 static sbitmap *anti_dependency_cache;
69 static sbitmap *output_dependency_cache;
71 /* To speed up checking consistency of formed forward insn
72 dependencies we use the following cache. Another possible solution
73 could be switching off checking duplication of insns in forward
74 dependencies. */
75 #ifdef ENABLE_CHECKING
76 static sbitmap *forward_dependency_cache;
77 #endif
79 static int deps_may_trap_p PARAMS ((rtx));
80 static void remove_dependence PARAMS ((rtx, rtx));
81 static void set_sched_group_p PARAMS ((rtx));
83 static void flush_pending_lists PARAMS ((struct deps *, rtx, int));
84 static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
85 static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
86 static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
87 static rtx group_leader PARAMS ((rtx));
89 static rtx get_condition PARAMS ((rtx));
90 static int conditions_mutex_p PARAMS ((rtx, rtx));
92 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
94 static int
95 deps_may_trap_p (mem)
96 rtx mem;
98 rtx addr = XEXP (mem, 0);
100 if (REG_P (addr)
101 && REGNO (addr) >= FIRST_PSEUDO_REGISTER
102 && reg_known_value[REGNO (addr)])
103 addr = reg_known_value[REGNO (addr)];
104 return rtx_addr_can_trap_p (addr);
107 /* Return the INSN_LIST containing INSN in LIST, or NULL
108 if LIST does not contain INSN. */
110 HAIFA_INLINE rtx
111 find_insn_list (insn, list)
112 rtx insn;
113 rtx list;
115 while (list)
117 if (XEXP (list, 0) == insn)
118 return list;
119 list = XEXP (list, 1);
121 return 0;
124 /* Return 1 if the pair (insn, x) is found in (LIST, LIST1), or 0
125 otherwise. */
127 HAIFA_INLINE int
128 find_insn_mem_list (insn, x, list, list1)
129 rtx insn, x;
130 rtx list, list1;
132 while (list)
134 if (XEXP (list, 0) == insn
135 && XEXP (list1, 0) == x)
136 return 1;
137 list = XEXP (list, 1);
138 list1 = XEXP (list1, 1);
140 return 0;
143 /* Find the condition under which INSN is executed. */
145 static rtx
146 get_condition (insn)
147 rtx insn;
149 rtx pat = PATTERN (insn);
150 rtx cond;
152 if (pat == 0)
153 return 0;
154 if (GET_CODE (pat) == COND_EXEC)
155 return COND_EXEC_TEST (pat);
156 if (GET_CODE (insn) != JUMP_INSN)
157 return 0;
158 if (GET_CODE (pat) != SET || SET_SRC (pat) != pc_rtx)
159 return 0;
160 if (GET_CODE (SET_DEST (pat)) != IF_THEN_ELSE)
161 return 0;
162 pat = SET_DEST (pat);
163 cond = XEXP (pat, 0);
164 if (GET_CODE (XEXP (cond, 1)) == LABEL_REF
165 && XEXP (cond, 2) == pc_rtx)
166 return cond;
167 else if (GET_CODE (XEXP (cond, 2)) == LABEL_REF
168 && XEXP (cond, 1) == pc_rtx)
169 return gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)), GET_MODE (cond),
170 XEXP (cond, 0), XEXP (cond, 1));
171 else
172 return 0;
175 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
177 static int
178 conditions_mutex_p (cond1, cond2)
179 rtx cond1, cond2;
181 if (GET_RTX_CLASS (GET_CODE (cond1)) == '<'
182 && GET_RTX_CLASS (GET_CODE (cond2)) == '<'
183 && GET_CODE (cond1) == reverse_condition (GET_CODE (cond2))
184 && XEXP (cond1, 0) == XEXP (cond2, 0)
185 && XEXP (cond1, 1) == XEXP (cond2, 1))
186 return 1;
187 return 0;
190 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
191 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
192 of dependence that this link represents. */
194 void
195 add_dependence (insn, elem, dep_type)
196 rtx insn;
197 rtx elem;
198 enum reg_note dep_type;
200 rtx link, next;
201 int present_p;
202 rtx cond1, cond2;
204 /* Don't depend an insn on itself. */
205 if (insn == elem)
206 return;
208 /* We can get a dependency on deleted insns due to optimizations in
209 the register allocation and reloading or due to splitting. Any
210 such dependency is useless and can be ignored. */
211 if (GET_CODE (elem) == NOTE)
212 return;
214 /* flow.c doesn't handle conditional lifetimes entirely correctly;
215 calls mess up the conditional lifetimes. */
216 if (GET_CODE (insn) != CALL_INSN && GET_CODE (elem) != CALL_INSN)
218 cond1 = get_condition (insn);
219 cond2 = get_condition (elem);
220 if (cond1 && cond2 && conditions_mutex_p (cond1, cond2))
221 return;
224 /* If elem is part of a sequence that must be scheduled together, then
225 make the dependence point to the last insn of the sequence.
226 When HAVE_cc0, it is possible for NOTEs to exist between users and
227 setters of the condition codes, so we must skip past notes here.
228 Otherwise, NOTEs are impossible here. */
229 next = next_nonnote_insn (elem);
230 if (next && SCHED_GROUP_P (next)
231 && GET_CODE (next) != CODE_LABEL)
233 /* Notes will never intervene here though, so don't bother checking
234 for them. */
235 /* Hah! Wrong. */
236 /* We must reject CODE_LABELs, so that we don't get confused by one
237 that has LABEL_PRESERVE_P set, which is represented by the same
238 bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
239 SCHED_GROUP_P. */
241 rtx nnext;
242 while ((nnext = next_nonnote_insn (next)) != NULL
243 && SCHED_GROUP_P (nnext)
244 && GET_CODE (nnext) != CODE_LABEL)
245 next = nnext;
247 /* Again, don't depend an insn on itself. */
248 if (insn == next)
249 return;
251 /* Make the dependence to NEXT, the last insn of the group, instead
252 of the original ELEM. */
253 elem = next;
256 present_p = 1;
257 #ifdef INSN_SCHEDULING
258 /* ??? No good way to tell from here whether we're doing interblock
259 scheduling. Possibly add another callback. */
260 #if 0
261 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
262 No need for interblock dependences with calls, since
263 calls are not moved between blocks. Note: the edge where
264 elem is a CALL is still required. */
265 if (GET_CODE (insn) == CALL_INSN
266 && (INSN_BB (elem) != INSN_BB (insn)))
267 return;
268 #endif
270 /* If we already have a dependency for ELEM, then we do not need to
271 do anything. Avoiding the list walk below can cut compile times
272 dramatically for some code. */
273 if (true_dependency_cache != NULL)
275 enum reg_note present_dep_type = 0;
277 if (anti_dependency_cache == NULL || output_dependency_cache == NULL)
278 abort ();
279 if (TEST_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem)))
280 /* Do nothing (present_set_type is already 0). */
282 else if (TEST_BIT (anti_dependency_cache[INSN_LUID (insn)],
283 INSN_LUID (elem)))
284 present_dep_type = REG_DEP_ANTI;
285 else if (TEST_BIT (output_dependency_cache[INSN_LUID (insn)],
286 INSN_LUID (elem)))
287 present_dep_type = REG_DEP_OUTPUT;
288 else
289 present_p = 0;
290 if (present_p && (int) dep_type >= (int) present_dep_type)
291 return;
293 #endif
295 /* Check that we don't already have this dependence. */
296 if (present_p)
297 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
298 if (XEXP (link, 0) == elem)
300 #ifdef INSN_SCHEDULING
301 /* Clear corresponding cache entry because type of the link
302 may be changed. */
303 if (true_dependency_cache != NULL)
305 if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
306 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
307 INSN_LUID (elem));
308 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
309 && output_dependency_cache)
310 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
311 INSN_LUID (elem));
312 else
313 abort ();
315 #endif
317 /* If this is a more restrictive type of dependence than the existing
318 one, then change the existing dependence to this type. */
319 if ((int) dep_type < (int) REG_NOTE_KIND (link))
320 PUT_REG_NOTE_KIND (link, dep_type);
322 #ifdef INSN_SCHEDULING
323 /* If we are adding a dependency to INSN's LOG_LINKs, then
324 note that in the bitmap caches of dependency information. */
325 if (true_dependency_cache != NULL)
327 if ((int)REG_NOTE_KIND (link) == 0)
328 SET_BIT (true_dependency_cache[INSN_LUID (insn)],
329 INSN_LUID (elem));
330 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
331 SET_BIT (anti_dependency_cache[INSN_LUID (insn)],
332 INSN_LUID (elem));
333 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
334 SET_BIT (output_dependency_cache[INSN_LUID (insn)],
335 INSN_LUID (elem));
337 #endif
338 return;
340 /* Might want to check one level of transitivity to save conses. */
342 link = alloc_INSN_LIST (elem, LOG_LINKS (insn));
343 LOG_LINKS (insn) = link;
345 /* Insn dependency, not data dependency. */
346 PUT_REG_NOTE_KIND (link, dep_type);
348 #ifdef INSN_SCHEDULING
349 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
350 in the bitmap caches of dependency information. */
351 if (true_dependency_cache != NULL)
353 if ((int)dep_type == 0)
354 SET_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
355 else if (dep_type == REG_DEP_ANTI)
356 SET_BIT (anti_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
357 else if (dep_type == REG_DEP_OUTPUT)
358 SET_BIT (output_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
360 #endif
363 /* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
364 of INSN. Abort if not found. */
366 static void
367 remove_dependence (insn, elem)
368 rtx insn;
369 rtx elem;
371 rtx prev, link, next;
372 int found = 0;
374 for (prev = 0, link = LOG_LINKS (insn); link; link = next)
376 next = XEXP (link, 1);
377 if (XEXP (link, 0) == elem)
379 if (prev)
380 XEXP (prev, 1) = next;
381 else
382 LOG_LINKS (insn) = next;
384 #ifdef INSN_SCHEDULING
385 /* If we are removing a dependency from the LOG_LINKS list,
386 make sure to remove it from the cache too. */
387 if (true_dependency_cache != NULL)
389 if (REG_NOTE_KIND (link) == 0)
390 RESET_BIT (true_dependency_cache[INSN_LUID (insn)],
391 INSN_LUID (elem));
392 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
393 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
394 INSN_LUID (elem));
395 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
396 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
397 INSN_LUID (elem));
399 #endif
401 free_INSN_LIST_node (link);
403 found = 1;
405 else
406 prev = link;
409 if (!found)
410 abort ();
411 return;
414 /* Return an insn which represents a SCHED_GROUP, which is
415 the last insn in the group. */
417 static rtx
418 group_leader (insn)
419 rtx insn;
421 rtx prev;
425 prev = insn;
426 insn = next_nonnote_insn (insn);
428 while (insn && SCHED_GROUP_P (insn) && (GET_CODE (insn) != CODE_LABEL));
430 return prev;
433 /* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
434 goes along with that. */
436 static void
437 set_sched_group_p (insn)
438 rtx insn;
440 rtx link, prev;
442 SCHED_GROUP_P (insn) = 1;
444 /* There may be a note before this insn now, but all notes will
445 be removed before we actually try to schedule the insns, so
446 it won't cause a problem later. We must avoid it here though. */
447 prev = prev_nonnote_insn (insn);
449 /* Make a copy of all dependencies on the immediately previous insn,
450 and add to this insn. This is so that all the dependencies will
451 apply to the group. Remove an explicit dependence on this insn
452 as SCHED_GROUP_P now represents it. */
454 if (find_insn_list (prev, LOG_LINKS (insn)))
455 remove_dependence (insn, prev);
457 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
458 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
461 /* Process an insn's memory dependencies. There are four kinds of
462 dependencies:
464 (0) read dependence: read follows read
465 (1) true dependence: read follows write
466 (2) anti dependence: write follows read
467 (3) output dependence: write follows write
469 We are careful to build only dependencies which actually exist, and
470 use transitivity to avoid building too many links. */
472 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
473 The MEM is a memory reference contained within INSN, which we are saving
474 so that we can do memory aliasing on it. */
476 void
477 add_insn_mem_dependence (deps, insn_list, mem_list, insn, mem)
478 struct deps *deps;
479 rtx *insn_list, *mem_list, insn, mem;
481 register rtx link;
483 link = alloc_INSN_LIST (insn, *insn_list);
484 *insn_list = link;
486 if (current_sched_info->use_cselib)
488 mem = shallow_copy_rtx (mem);
489 XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
491 link = alloc_EXPR_LIST (VOIDmode, mem, *mem_list);
492 *mem_list = link;
494 deps->pending_lists_length++;
497 /* Make a dependency between every memory reference on the pending lists
498 and INSN, thus flushing the pending lists. If ONLY_WRITE, don't flush
499 the read list. */
501 static void
502 flush_pending_lists (deps, insn, only_write)
503 struct deps *deps;
504 rtx insn;
505 int only_write;
507 rtx u;
508 rtx link;
510 while (deps->pending_read_insns && ! only_write)
512 add_dependence (insn, XEXP (deps->pending_read_insns, 0),
513 REG_DEP_ANTI);
515 link = deps->pending_read_insns;
516 deps->pending_read_insns = XEXP (deps->pending_read_insns, 1);
517 free_INSN_LIST_node (link);
519 link = deps->pending_read_mems;
520 deps->pending_read_mems = XEXP (deps->pending_read_mems, 1);
521 free_EXPR_LIST_node (link);
523 while (deps->pending_write_insns)
525 add_dependence (insn, XEXP (deps->pending_write_insns, 0),
526 REG_DEP_ANTI);
528 link = deps->pending_write_insns;
529 deps->pending_write_insns = XEXP (deps->pending_write_insns, 1);
530 free_INSN_LIST_node (link);
532 link = deps->pending_write_mems;
533 deps->pending_write_mems = XEXP (deps->pending_write_mems, 1);
534 free_EXPR_LIST_node (link);
536 deps->pending_lists_length = 0;
538 /* last_pending_memory_flush is now a list of insns. */
539 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
540 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
542 free_INSN_LIST_list (&deps->last_pending_memory_flush);
543 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
544 deps->pending_flush_length = 1;
547 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
548 rtx, X, creating all dependencies generated by the write to the
549 destination of X, and reads of everything mentioned. */
551 static void
552 sched_analyze_1 (deps, x, insn)
553 struct deps *deps;
554 rtx x;
555 rtx insn;
557 register int regno;
558 register rtx dest = XEXP (x, 0);
559 enum rtx_code code = GET_CODE (x);
561 if (dest == 0)
562 return;
564 if (GET_CODE (dest) == PARALLEL)
566 register int i;
568 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
569 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
570 sched_analyze_1 (deps,
571 gen_rtx_CLOBBER (VOIDmode,
572 XEXP (XVECEXP (dest, 0, i), 0)),
573 insn);
575 if (GET_CODE (x) == SET)
576 sched_analyze_2 (deps, SET_SRC (x), insn);
577 return;
580 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
581 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
583 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
585 /* The second and third arguments are values read by this insn. */
586 sched_analyze_2 (deps, XEXP (dest, 1), insn);
587 sched_analyze_2 (deps, XEXP (dest, 2), insn);
589 dest = XEXP (dest, 0);
592 if (GET_CODE (dest) == REG)
594 register int i;
596 regno = REGNO (dest);
598 /* A hard reg in a wide mode may really be multiple registers.
599 If so, mark all of them just like the first. */
600 if (regno < FIRST_PSEUDO_REGISTER)
602 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
603 while (--i >= 0)
605 int r = regno + i;
606 rtx u;
608 for (u = deps->reg_last[r].uses; u; u = XEXP (u, 1))
609 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
611 for (u = deps->reg_last[r].sets; u; u = XEXP (u, 1))
612 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
614 /* Clobbers need not be ordered with respect to one
615 another, but sets must be ordered with respect to a
616 pending clobber. */
617 if (code == SET)
619 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
620 free_INSN_LIST_list (&deps->reg_last[r].uses);
621 for (u = deps->reg_last[r].clobbers; u; u = XEXP (u, 1))
622 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
623 SET_REGNO_REG_SET (reg_pending_sets, r);
625 else
626 SET_REGNO_REG_SET (reg_pending_clobbers, r);
628 /* Function calls clobber all call_used regs. */
629 if (global_regs[r]
630 || (code == SET
631 && TEST_HARD_REG_BIT (regs_invalidated_by_call, r)))
632 for (u = deps->last_function_call; u; u = XEXP (u, 1))
633 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
636 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
637 it does not reload. Ignore these as they have served their
638 purpose already. */
639 else if (regno >= deps->max_reg)
641 if (GET_CODE (PATTERN (insn)) != USE
642 && GET_CODE (PATTERN (insn)) != CLOBBER)
643 abort ();
645 else
647 rtx u;
649 for (u = deps->reg_last[regno].uses; u; u = XEXP (u, 1))
650 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
652 for (u = deps->reg_last[regno].sets; u; u = XEXP (u, 1))
653 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
655 if (code == SET)
657 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
658 free_INSN_LIST_list (&deps->reg_last[regno].uses);
659 for (u = deps->reg_last[regno].clobbers; u; u = XEXP (u, 1))
660 add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
661 SET_REGNO_REG_SET (reg_pending_sets, regno);
663 else
664 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
666 /* Pseudos that are REG_EQUIV to something may be replaced
667 by that during reloading. We need only add dependencies for
668 the address in the REG_EQUIV note. */
669 if (!reload_completed
670 && reg_known_equiv_p[regno]
671 && GET_CODE (reg_known_value[regno]) == MEM)
672 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
674 /* Don't let it cross a call after scheduling if it doesn't
675 already cross one. */
677 if (REG_N_CALLS_CROSSED (regno) == 0)
678 for (u = deps->last_function_call; u; u = XEXP (u, 1))
679 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
682 else if (GET_CODE (dest) == MEM)
684 /* Writing memory. */
685 rtx t = dest;
687 if (current_sched_info->use_cselib)
689 t = shallow_copy_rtx (dest);
690 cselib_lookup (XEXP (t, 0), Pmode, 1);
691 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
694 if (deps->pending_lists_length > MAX_PENDING_LIST_LENGTH)
696 /* Flush all pending reads and writes to prevent the pending lists
697 from getting any larger. Insn scheduling runs too slowly when
698 these lists get long. When compiling GCC with itself,
699 this flush occurs 8 times for sparc, and 10 times for m88k using
700 the default value of 32. */
701 flush_pending_lists (deps, insn, 0);
703 else
705 rtx u;
706 rtx pending, pending_mem;
708 pending = deps->pending_read_insns;
709 pending_mem = deps->pending_read_mems;
710 while (pending)
712 if (anti_dependence (XEXP (pending_mem, 0), t))
713 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
715 pending = XEXP (pending, 1);
716 pending_mem = XEXP (pending_mem, 1);
719 pending = deps->pending_write_insns;
720 pending_mem = deps->pending_write_mems;
721 while (pending)
723 if (output_dependence (XEXP (pending_mem, 0), t))
724 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
726 pending = XEXP (pending, 1);
727 pending_mem = XEXP (pending_mem, 1);
730 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
731 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
733 add_insn_mem_dependence (deps, &deps->pending_write_insns,
734 &deps->pending_write_mems, insn, dest);
736 sched_analyze_2 (deps, XEXP (dest, 0), insn);
739 /* Analyze reads. */
740 if (GET_CODE (x) == SET)
741 sched_analyze_2 (deps, SET_SRC (x), insn);
744 /* Analyze the uses of memory and registers in rtx X in INSN. */
746 static void
747 sched_analyze_2 (deps, x, insn)
748 struct deps *deps;
749 rtx x;
750 rtx insn;
752 register int i;
753 register int j;
754 register enum rtx_code code;
755 register const char *fmt;
757 if (x == 0)
758 return;
760 code = GET_CODE (x);
762 switch (code)
764 case CONST_INT:
765 case CONST_DOUBLE:
766 case SYMBOL_REF:
767 case CONST:
768 case LABEL_REF:
769 /* Ignore constants. Note that we must handle CONST_DOUBLE here
770 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
771 this does not mean that this insn is using cc0. */
772 return;
774 #ifdef HAVE_cc0
775 case CC0:
776 /* User of CC0 depends on immediately preceding insn. */
777 set_sched_group_p (insn);
778 return;
779 #endif
781 case REG:
783 rtx u;
784 int regno = REGNO (x);
785 if (regno < FIRST_PSEUDO_REGISTER)
787 int i;
789 i = HARD_REGNO_NREGS (regno, GET_MODE (x));
790 while (--i >= 0)
792 int r = regno + i;
793 deps->reg_last[r].uses
794 = alloc_INSN_LIST (insn, deps->reg_last[r].uses);
795 SET_REGNO_REG_SET (&deps->reg_last_in_use, r);
797 for (u = deps->reg_last[r].sets; u; u = XEXP (u, 1))
798 add_dependence (insn, XEXP (u, 0), 0);
800 /* ??? This should never happen. */
801 for (u = deps->reg_last[r].clobbers; u; u = XEXP (u, 1))
802 add_dependence (insn, XEXP (u, 0), 0);
804 if (call_used_regs[r] || global_regs[r])
805 /* Function calls clobber all call_used regs. */
806 for (u = deps->last_function_call; u; u = XEXP (u, 1))
807 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
810 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
811 it does not reload. Ignore these as they have served their
812 purpose already. */
813 else if (regno >= deps->max_reg)
815 if (GET_CODE (PATTERN (insn)) != USE
816 && GET_CODE (PATTERN (insn)) != CLOBBER)
817 abort ();
819 else
821 deps->reg_last[regno].uses
822 = alloc_INSN_LIST (insn, deps->reg_last[regno].uses);
823 SET_REGNO_REG_SET (&deps->reg_last_in_use, regno);
825 for (u = deps->reg_last[regno].sets; u; u = XEXP (u, 1))
826 add_dependence (insn, XEXP (u, 0), 0);
828 /* ??? This should never happen. */
829 for (u = deps->reg_last[regno].clobbers; u; u = XEXP (u, 1))
830 add_dependence (insn, XEXP (u, 0), 0);
832 /* Pseudos that are REG_EQUIV to something may be replaced
833 by that during reloading. We need only add dependencies for
834 the address in the REG_EQUIV note. */
835 if (!reload_completed
836 && reg_known_equiv_p[regno]
837 && GET_CODE (reg_known_value[regno]) == MEM)
838 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
840 /* If the register does not already cross any calls, then add this
841 insn to the sched_before_next_call list so that it will still
842 not cross calls after scheduling. */
843 if (REG_N_CALLS_CROSSED (regno) == 0)
844 add_dependence (deps->sched_before_next_call, insn,
845 REG_DEP_ANTI);
847 return;
850 case MEM:
852 /* Reading memory. */
853 rtx u;
854 rtx pending, pending_mem;
855 rtx t = x;
857 if (current_sched_info->use_cselib)
859 t = shallow_copy_rtx (t);
860 cselib_lookup (XEXP (t, 0), Pmode, 1);
861 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
863 pending = deps->pending_read_insns;
864 pending_mem = deps->pending_read_mems;
865 while (pending)
867 if (read_dependence (XEXP (pending_mem, 0), t))
868 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
870 pending = XEXP (pending, 1);
871 pending_mem = XEXP (pending_mem, 1);
874 pending = deps->pending_write_insns;
875 pending_mem = deps->pending_write_mems;
876 while (pending)
878 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
879 t, rtx_varies_p))
880 add_dependence (insn, XEXP (pending, 0), 0);
882 pending = XEXP (pending, 1);
883 pending_mem = XEXP (pending_mem, 1);
886 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
887 if (GET_CODE (XEXP (u, 0)) != JUMP_INSN
888 || deps_may_trap_p (x))
889 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
891 /* Always add these dependencies to pending_reads, since
892 this insn may be followed by a write. */
893 add_insn_mem_dependence (deps, &deps->pending_read_insns,
894 &deps->pending_read_mems, insn, x);
896 /* Take advantage of tail recursion here. */
897 sched_analyze_2 (deps, XEXP (x, 0), insn);
898 return;
901 /* Force pending stores to memory in case a trap handler needs them. */
902 case TRAP_IF:
903 flush_pending_lists (deps, insn, 1);
904 break;
906 case ASM_OPERANDS:
907 case ASM_INPUT:
908 case UNSPEC_VOLATILE:
910 rtx u;
912 /* Traditional and volatile asm instructions must be considered to use
913 and clobber all hard registers, all pseudo-registers and all of
914 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
916 Consider for instance a volatile asm that changes the fpu rounding
917 mode. An insn should not be moved across this even if it only uses
918 pseudo-regs because it might give an incorrectly rounded result. */
919 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
921 for (i = 0; i < deps->max_reg; i++)
923 struct deps_reg *reg_last = &deps->reg_last[i];
925 for (u = reg_last->uses; u; u = XEXP (u, 1))
926 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
927 for (u = reg_last->sets; u; u = XEXP (u, 1))
928 add_dependence (insn, XEXP (u, 0), 0);
929 for (u = reg_last->clobbers; u; u = XEXP (u, 1))
930 add_dependence (insn, XEXP (u, 0), 0);
932 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
933 free_INSN_LIST_list (&reg_last->uses);
935 reg_pending_sets_all = 1;
937 flush_pending_lists (deps, insn, 0);
940 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
941 We can not just fall through here since then we would be confused
942 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
943 traditional asms unlike their normal usage. */
945 if (code == ASM_OPERANDS)
947 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
948 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
949 return;
951 break;
954 case PRE_DEC:
955 case POST_DEC:
956 case PRE_INC:
957 case POST_INC:
958 /* These both read and modify the result. We must handle them as writes
959 to get proper dependencies for following instructions. We must handle
960 them as reads to get proper dependencies from this to previous
961 instructions. Thus we need to pass them to both sched_analyze_1
962 and sched_analyze_2. We must call sched_analyze_2 first in order
963 to get the proper antecedent for the read. */
964 sched_analyze_2 (deps, XEXP (x, 0), insn);
965 sched_analyze_1 (deps, x, insn);
966 return;
968 case POST_MODIFY:
969 case PRE_MODIFY:
970 /* op0 = op0 + op1 */
971 sched_analyze_2 (deps, XEXP (x, 0), insn);
972 sched_analyze_2 (deps, XEXP (x, 1), insn);
973 sched_analyze_1 (deps, x, insn);
974 return;
976 default:
977 break;
980 /* Other cases: walk the insn. */
981 fmt = GET_RTX_FORMAT (code);
982 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
984 if (fmt[i] == 'e')
985 sched_analyze_2 (deps, XEXP (x, i), insn);
986 else if (fmt[i] == 'E')
987 for (j = 0; j < XVECLEN (x, i); j++)
988 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
992 /* Analyze an INSN with pattern X to find all dependencies. */
994 static void
995 sched_analyze_insn (deps, x, insn, loop_notes)
996 struct deps *deps;
997 rtx x, insn;
998 rtx loop_notes;
1000 register RTX_CODE code = GET_CODE (x);
1001 int schedule_barrier_found = 0;
1002 rtx link;
1003 int i;
1005 if (code == COND_EXEC)
1007 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
1009 /* ??? Should be recording conditions so we reduce the number of
1010 false dependancies. */
1011 x = COND_EXEC_CODE (x);
1012 code = GET_CODE (x);
1014 if (code == SET || code == CLOBBER)
1015 sched_analyze_1 (deps, x, insn);
1016 else if (code == PARALLEL)
1018 register int i;
1019 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1021 rtx sub = XVECEXP (x, 0, i);
1022 code = GET_CODE (sub);
1024 if (code == COND_EXEC)
1026 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
1027 sub = COND_EXEC_CODE (sub);
1028 code = GET_CODE (sub);
1030 if (code == SET || code == CLOBBER)
1031 sched_analyze_1 (deps, sub, insn);
1032 else
1033 sched_analyze_2 (deps, sub, insn);
1036 else
1037 sched_analyze_2 (deps, x, insn);
1039 /* Mark registers CLOBBERED or used by called function. */
1040 if (GET_CODE (insn) == CALL_INSN)
1042 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1044 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
1045 sched_analyze_1 (deps, XEXP (link, 0), insn);
1046 else
1047 sched_analyze_2 (deps, XEXP (link, 0), insn);
1049 if (find_reg_note (insn, REG_SETJMP, NULL))
1050 schedule_barrier_found = 1;
1053 if (GET_CODE (insn) == JUMP_INSN)
1055 rtx next;
1056 next = next_nonnote_insn (insn);
1057 if (next && GET_CODE (next) == BARRIER)
1058 schedule_barrier_found = 1;
1059 else
1061 rtx pending, pending_mem, u;
1062 regset_head tmp;
1063 INIT_REG_SET (&tmp);
1065 (*current_sched_info->compute_jump_reg_dependencies) (insn, &tmp);
1066 EXECUTE_IF_SET_IN_REG_SET (&tmp, 0, i,
1068 struct deps_reg *reg_last = &deps->reg_last[i];
1069 for (u = reg_last->sets; u; u = XEXP (u, 1))
1070 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1071 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1072 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1075 CLEAR_REG_SET (&tmp);
1077 /* All memory writes and volatile reads must happen before the
1078 jump. Non-volatile reads must happen before the jump iff
1079 the result is needed by the above register used mask. */
1081 pending = deps->pending_write_insns;
1082 pending_mem = deps->pending_write_mems;
1083 while (pending)
1085 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1086 pending = XEXP (pending, 1);
1087 pending_mem = XEXP (pending_mem, 1);
1090 pending = deps->pending_read_insns;
1091 pending_mem = deps->pending_read_mems;
1092 while (pending)
1094 if (MEM_VOLATILE_P (XEXP (pending_mem, 0)))
1095 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1096 pending = XEXP (pending, 1);
1097 pending_mem = XEXP (pending_mem, 1);
1100 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
1101 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1105 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
1106 block, then we must be sure that no instructions are scheduled across it.
1107 Otherwise, the reg_n_refs info (which depends on loop_depth) would
1108 become incorrect. */
1109 if (loop_notes)
1111 rtx link;
1113 /* Update loop_notes with any notes from this insn. Also determine
1114 if any of the notes on the list correspond to instruction scheduling
1115 barriers (loop, eh & setjmp notes, but not range notes). */
1116 link = loop_notes;
1117 while (XEXP (link, 1))
1119 if (INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_BEG
1120 || INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END
1121 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_BEG
1122 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END)
1123 schedule_barrier_found = 1;
1125 link = XEXP (link, 1);
1127 XEXP (link, 1) = REG_NOTES (insn);
1128 REG_NOTES (insn) = loop_notes;
1131 /* If this instruction can throw an exception, then moving it changes
1132 where block boundaries fall. This is mighty confusing elsewhere.
1133 Therefore, prevent such an instruction from being moved. */
1134 if (flag_non_call_exceptions && can_throw_internal (insn))
1135 schedule_barrier_found = 1;
1137 /* Add dependencies if a scheduling barrier was found. */
1138 if (schedule_barrier_found)
1140 rtx u;
1142 for (i = 0; i < deps->max_reg; i++)
1144 struct deps_reg *reg_last = &deps->reg_last[i];
1146 for (u = reg_last->uses; u; u = XEXP (u, 1))
1147 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1148 for (u = reg_last->sets; u; u = XEXP (u, 1))
1149 add_dependence (insn, XEXP (u, 0), 0);
1150 for (u = reg_last->clobbers; u; u = XEXP (u, 1))
1151 add_dependence (insn, XEXP (u, 0), 0);
1153 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1154 free_INSN_LIST_list (&reg_last->uses);
1156 flush_pending_lists (deps, insn, 0);
1158 reg_pending_sets_all = 1;
1161 /* Accumulate clobbers until the next set so that it will be output
1162 dependent on all of them. At the next set we can clear the clobber
1163 list, since subsequent sets will be output dependent on it. */
1164 if (reg_pending_sets_all)
1166 reg_pending_sets_all = 0;
1167 for (i = 0; i < deps->max_reg; i++)
1169 struct deps_reg *reg_last = &deps->reg_last[i];
1170 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1172 free_INSN_LIST_list (&reg_last->sets);
1173 free_INSN_LIST_list (&reg_last->clobbers);
1175 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1176 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1179 else
1181 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
1183 struct deps_reg *reg_last = &deps->reg_last[i];
1184 if (GET_CODE (PATTERN (insn)) != COND_EXEC)
1186 free_INSN_LIST_list (&reg_last->sets);
1187 free_INSN_LIST_list (&reg_last->clobbers);
1189 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1190 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1192 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
1194 struct deps_reg *reg_last = &deps->reg_last[i];
1195 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1196 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1199 CLEAR_REG_SET (reg_pending_sets);
1200 CLEAR_REG_SET (reg_pending_clobbers);
1202 /* If a post-call group is still open, see if it should remain so.
1203 This insn must be a simple move of a hard reg to a pseudo or
1204 vice-versa.
1206 We must avoid moving these insns for correctness on
1207 SMALL_REGISTER_CLASS machines, and for special registers like
1208 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1209 hard regs for all targets. */
1211 if (deps->in_post_call_group_p)
1213 rtx tmp, set = single_set (insn);
1214 int src_regno, dest_regno;
1216 if (set == NULL)
1217 goto end_call_group;
1219 tmp = SET_DEST (set);
1220 if (GET_CODE (tmp) == SUBREG)
1221 tmp = SUBREG_REG (tmp);
1222 if (GET_CODE (tmp) == REG)
1223 dest_regno = REGNO (tmp);
1224 else
1225 goto end_call_group;
1227 tmp = SET_SRC (set);
1228 if (GET_CODE (tmp) == SUBREG)
1229 tmp = SUBREG_REG (tmp);
1230 if (GET_CODE (tmp) == REG)
1231 src_regno = REGNO (tmp);
1232 else
1233 goto end_call_group;
1235 if (src_regno < FIRST_PSEUDO_REGISTER
1236 || dest_regno < FIRST_PSEUDO_REGISTER)
1238 set_sched_group_p (insn);
1239 CANT_MOVE (insn) = 1;
1241 else
1243 end_call_group:
1244 deps->in_post_call_group_p = 0;
1249 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1250 for every dependency. */
1252 void
1253 sched_analyze (deps, head, tail)
1254 struct deps *deps;
1255 rtx head, tail;
1257 register rtx insn;
1258 register rtx u;
1259 rtx loop_notes = 0;
1261 if (current_sched_info->use_cselib)
1262 cselib_init ();
1264 for (insn = head;; insn = NEXT_INSN (insn))
1266 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1268 /* Clear out the stale LOG_LINKS from flow. */
1269 free_INSN_LIST_list (&LOG_LINKS (insn));
1271 /* Clear out stale SCHED_GROUP_P. */
1272 SCHED_GROUP_P (insn) = 0;
1274 /* Make each JUMP_INSN a scheduling barrier for memory
1275 references. */
1276 if (GET_CODE (insn) == JUMP_INSN)
1278 /* Keep the list a reasonable size. */
1279 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
1280 flush_pending_lists (deps, insn, 0);
1281 else
1282 deps->last_pending_memory_flush
1283 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
1285 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1286 loop_notes = 0;
1288 else if (GET_CODE (insn) == CALL_INSN)
1290 rtx x;
1291 register int i;
1293 /* Clear out stale SCHED_GROUP_P. */
1294 SCHED_GROUP_P (insn) = 0;
1296 CANT_MOVE (insn) = 1;
1298 /* Clear out the stale LOG_LINKS from flow. */
1299 free_INSN_LIST_list (&LOG_LINKS (insn));
1301 /* Any instruction using a hard register which may get clobbered
1302 by a call needs to be marked as dependent on this call.
1303 This prevents a use of a hard return reg from being moved
1304 past a void call (i.e. it does not explicitly set the hard
1305 return reg). */
1307 /* If this call has REG_SETJMP, then assume that
1308 all registers, not just hard registers, may be clobbered by this
1309 call. */
1311 /* Insn, being a CALL_INSN, magically depends on
1312 `last_function_call' already. */
1314 if (find_reg_note (insn, REG_SETJMP, NULL))
1316 for (i = 0; i < deps->max_reg; i++)
1318 struct deps_reg *reg_last = &deps->reg_last[i];
1320 for (u = reg_last->uses; u; u = XEXP (u, 1))
1321 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1322 for (u = reg_last->sets; u; u = XEXP (u, 1))
1323 add_dependence (insn, XEXP (u, 0), 0);
1324 for (u = reg_last->clobbers; u; u = XEXP (u, 1))
1325 add_dependence (insn, XEXP (u, 0), 0);
1327 free_INSN_LIST_list (&reg_last->uses);
1329 reg_pending_sets_all = 1;
1331 else
1333 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1334 if (call_used_regs[i] || global_regs[i])
1336 for (u = deps->reg_last[i].uses; u; u = XEXP (u, 1))
1337 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1338 for (u = deps->reg_last[i].sets; u; u = XEXP (u, 1))
1339 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1341 SET_REGNO_REG_SET (reg_pending_clobbers, i);
1345 /* For each insn which shouldn't cross a call, add a dependence
1346 between that insn and this call insn. */
1347 x = LOG_LINKS (deps->sched_before_next_call);
1348 while (x)
1350 add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
1351 x = XEXP (x, 1);
1353 free_INSN_LIST_list (&LOG_LINKS (deps->sched_before_next_call));
1355 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1356 loop_notes = 0;
1358 /* In the absence of interprocedural alias analysis, we must flush
1359 all pending reads and writes, and start new dependencies starting
1360 from here. But only flush writes for constant calls (which may
1361 be passed a pointer to something we haven't written yet). */
1362 flush_pending_lists (deps, insn, CONST_OR_PURE_CALL_P (insn));
1364 /* Depend this function call (actually, the user of this
1365 function call) on all hard register clobberage. */
1367 /* last_function_call is now a list of insns. */
1368 free_INSN_LIST_list (&deps->last_function_call);
1369 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
1371 /* Before reload, begin a post-call group, so as to keep the
1372 lifetimes of hard registers correct. */
1373 if (! reload_completed)
1374 deps->in_post_call_group_p = 1;
1377 /* See comments on reemit_notes as to why we do this.
1378 ??? Actually, the reemit_notes just say what is done, not why. */
1380 else if (GET_CODE (insn) == NOTE
1381 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_BEG
1382 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_END))
1384 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE, NOTE_RANGE_INFO (insn),
1385 loop_notes);
1386 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1387 GEN_INT (NOTE_LINE_NUMBER (insn)),
1388 loop_notes);
1390 else if (GET_CODE (insn) == NOTE
1391 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
1392 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
1393 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1394 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END))
1396 rtx rtx_region;
1398 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1399 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END)
1400 rtx_region = GEN_INT (NOTE_EH_HANDLER (insn));
1401 else
1402 rtx_region = GEN_INT (0);
1404 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1405 rtx_region,
1406 loop_notes);
1407 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1408 GEN_INT (NOTE_LINE_NUMBER (insn)),
1409 loop_notes);
1410 CONST_OR_PURE_CALL_P (loop_notes) = CONST_OR_PURE_CALL_P (insn);
1413 if (current_sched_info->use_cselib)
1414 cselib_process_insn (insn);
1415 if (insn == tail)
1417 if (current_sched_info->use_cselib)
1418 cselib_finish ();
1419 return;
1422 abort ();
1425 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1426 dependences from LOG_LINKS to build forward dependences in
1427 INSN_DEPEND. */
1429 void
1430 compute_forward_dependences (head, tail)
1431 rtx head, tail;
1433 rtx insn, link;
1434 rtx next_tail;
1435 enum reg_note dep_type;
1437 next_tail = NEXT_INSN (tail);
1438 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1440 if (! INSN_P (insn))
1441 continue;
1443 insn = group_leader (insn);
1445 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
1447 rtx x = group_leader (XEXP (link, 0));
1448 rtx new_link;
1450 if (x != XEXP (link, 0))
1451 continue;
1453 #ifdef ENABLE_CHECKING
1454 /* If add_dependence is working properly there should never
1455 be notes, deleted insns or duplicates in the backward
1456 links. Thus we need not check for them here.
1458 However, if we have enabled checking we might as well go
1459 ahead and verify that add_dependence worked properly. */
1460 if (GET_CODE (x) == NOTE
1461 || INSN_DELETED_P (x)
1462 || (forward_dependency_cache != NULL
1463 && TEST_BIT (forward_dependency_cache[INSN_LUID (x)],
1464 INSN_LUID (insn)))
1465 || (forward_dependency_cache == NULL
1466 && find_insn_list (insn, INSN_DEPEND (x))))
1467 abort ();
1468 if (forward_dependency_cache != NULL)
1469 SET_BIT (forward_dependency_cache[INSN_LUID (x)],
1470 INSN_LUID (insn));
1471 #endif
1473 new_link = alloc_INSN_LIST (insn, INSN_DEPEND (x));
1475 dep_type = REG_NOTE_KIND (link);
1476 PUT_REG_NOTE_KIND (new_link, dep_type);
1478 INSN_DEPEND (x) = new_link;
1479 INSN_DEP_COUNT (insn) += 1;
1484 /* Initialize variables for region data dependence analysis.
1485 n_bbs is the number of region blocks. */
1487 void
1488 init_deps (deps)
1489 struct deps *deps;
1491 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
1493 deps->max_reg = max_reg;
1494 deps->reg_last = (struct deps_reg *)
1495 xcalloc (max_reg, sizeof (struct deps_reg));
1496 INIT_REG_SET (&deps->reg_last_in_use);
1498 deps->pending_read_insns = 0;
1499 deps->pending_read_mems = 0;
1500 deps->pending_write_insns = 0;
1501 deps->pending_write_mems = 0;
1502 deps->pending_lists_length = 0;
1503 deps->pending_flush_length = 0;
1504 deps->last_pending_memory_flush = 0;
1505 deps->last_function_call = 0;
1506 deps->in_post_call_group_p = 0;
1508 deps->sched_before_next_call
1509 = gen_rtx_INSN (VOIDmode, 0, NULL_RTX, NULL_RTX,
1510 NULL_RTX, 0, NULL_RTX, NULL_RTX);
1511 LOG_LINKS (deps->sched_before_next_call) = 0;
1514 /* Free insn lists found in DEPS. */
1516 void
1517 free_deps (deps)
1518 struct deps *deps;
1520 int i;
1522 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
1523 times. For a test case with 42000 regs and 8000 small basic blocks,
1524 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
1525 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
1527 struct deps_reg *reg_last = &deps->reg_last[i];
1528 free_INSN_LIST_list (&reg_last->uses);
1529 free_INSN_LIST_list (&reg_last->sets);
1530 free_INSN_LIST_list (&reg_last->clobbers);
1532 CLEAR_REG_SET (&deps->reg_last_in_use);
1534 free (deps->reg_last);
1535 deps->reg_last = NULL;
1538 /* If it is profitable to use them, initialize caches for tracking
1539 dependency informatino. LUID is the number of insns to be scheduled,
1540 it is used in the estimate of profitability. */
1542 void
1543 init_dependency_caches (luid)
1544 int luid;
1546 /* ?!? We could save some memory by computing a per-region luid mapping
1547 which could reduce both the number of vectors in the cache and the size
1548 of each vector. Instead we just avoid the cache entirely unless the
1549 average number of instructions in a basic block is very high. See
1550 the comment before the declaration of true_dependency_cache for
1551 what we consider "very high". */
1552 if (luid / n_basic_blocks > 100 * 5)
1554 true_dependency_cache = sbitmap_vector_alloc (luid, luid);
1555 sbitmap_vector_zero (true_dependency_cache, luid);
1556 anti_dependency_cache = sbitmap_vector_alloc (luid, luid);
1557 sbitmap_vector_zero (anti_dependency_cache, luid);
1558 output_dependency_cache = sbitmap_vector_alloc (luid, luid);
1559 sbitmap_vector_zero (output_dependency_cache, luid);
1560 #ifdef ENABLE_CHECKING
1561 forward_dependency_cache = sbitmap_vector_alloc (luid, luid);
1562 sbitmap_vector_zero (forward_dependency_cache, luid);
1563 #endif
1567 /* Free the caches allocated in init_dependency_caches. */
1569 void
1570 free_dependency_caches ()
1572 if (true_dependency_cache)
1574 sbitmap_vector_free (true_dependency_cache);
1575 true_dependency_cache = NULL;
1576 sbitmap_vector_free (anti_dependency_cache);
1577 anti_dependency_cache = NULL;
1578 sbitmap_vector_free (output_dependency_cache);
1579 output_dependency_cache = NULL;
1580 #ifdef ENABLE_CHECKING
1581 sbitmap_vector_free (forward_dependency_cache);
1582 forward_dependency_cache = NULL;
1583 #endif
1587 /* Initialize some global variables needed by the dependency analysis
1588 code. */
1590 void
1591 init_deps_global ()
1593 reg_pending_sets = INITIALIZE_REG_SET (reg_pending_sets_head);
1594 reg_pending_clobbers = INITIALIZE_REG_SET (reg_pending_clobbers_head);
1595 reg_pending_sets_all = 0;
1598 /* Free everything used by the dependency analysis code. */
1600 void
1601 finish_deps_global ()
1603 FREE_REG_SET (reg_pending_sets);
1604 FREE_REG_SET (reg_pending_clobbers);