* configure.in: Remove special pex-cygwin consideration.
[official-gcc.git] / gcc / sched-deps.c
blobd6fa2c821a61a85409031ae700205eaeb418db70
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to the Free
22 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
23 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "toplev.h"
30 #include "rtl.h"
31 #include "tm_p.h"
32 #include "hard-reg-set.h"
33 #include "basic-block.h"
34 #include "regs.h"
35 #include "function.h"
36 #include "flags.h"
37 #include "insn-config.h"
38 #include "insn-attr.h"
39 #include "except.h"
40 #include "toplev.h"
41 #include "recog.h"
42 #include "sched-int.h"
43 #include "params.h"
44 #include "cselib.h"
46 extern char *reg_known_equiv_p;
47 extern rtx *reg_known_value;
49 static regset_head reg_pending_sets_head;
50 static regset_head reg_pending_clobbers_head;
51 static regset_head reg_pending_uses_head;
53 static regset reg_pending_sets;
54 static regset reg_pending_clobbers;
55 static regset reg_pending_uses;
56 static bool reg_pending_barrier;
58 /* To speed up the test for duplicate dependency links we keep a
59 record of dependencies created by add_dependence when the average
60 number of instructions in a basic block is very large.
62 Studies have shown that there is typically around 5 instructions between
63 branches for typical C code. So we can make a guess that the average
64 basic block is approximately 5 instructions long; we will choose 100X
65 the average size as a very large basic block.
67 Each insn has associated bitmaps for its dependencies. Each bitmap
68 has enough entries to represent a dependency on any other insn in
69 the insn chain. All bitmap for true dependencies cache is
70 allocated then the rest two ones are also allocated. */
71 static sbitmap *true_dependency_cache;
72 static sbitmap *anti_dependency_cache;
73 static sbitmap *output_dependency_cache;
75 /* To speed up checking consistency of formed forward insn
76 dependencies we use the following cache. Another possible solution
77 could be switching off checking duplication of insns in forward
78 dependencies. */
79 #ifdef ENABLE_CHECKING
80 static sbitmap *forward_dependency_cache;
81 #endif
83 static int deps_may_trap_p PARAMS ((rtx));
84 static void add_dependence_list PARAMS ((rtx, rtx, enum reg_note));
85 static void add_dependence_list_and_free PARAMS ((rtx, rtx *, enum reg_note));
86 static void remove_dependence PARAMS ((rtx, rtx));
87 static void set_sched_group_p PARAMS ((rtx));
89 static void flush_pending_lists PARAMS ((struct deps *, rtx, int, int));
90 static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
91 static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
92 static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
93 static rtx group_leader PARAMS ((rtx));
95 static rtx get_condition PARAMS ((rtx));
96 static int conditions_mutex_p PARAMS ((rtx, rtx));
98 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
100 static int
101 deps_may_trap_p (mem)
102 rtx mem;
104 rtx addr = XEXP (mem, 0);
106 if (REG_P (addr)
107 && REGNO (addr) >= FIRST_PSEUDO_REGISTER
108 && reg_known_value[REGNO (addr)])
109 addr = reg_known_value[REGNO (addr)];
110 return rtx_addr_can_trap_p (addr);
113 /* Return the INSN_LIST containing INSN in LIST, or NULL
114 if LIST does not contain INSN. */
117 find_insn_list (insn, list)
118 rtx insn;
119 rtx list;
121 while (list)
123 if (XEXP (list, 0) == insn)
124 return list;
125 list = XEXP (list, 1);
127 return 0;
130 /* Find the condition under which INSN is executed. */
132 static rtx
133 get_condition (insn)
134 rtx insn;
136 rtx pat = PATTERN (insn);
137 rtx cond;
139 if (pat == 0)
140 return 0;
141 if (GET_CODE (pat) == COND_EXEC)
142 return COND_EXEC_TEST (pat);
143 if (GET_CODE (insn) != JUMP_INSN)
144 return 0;
145 if (GET_CODE (pat) != SET || SET_SRC (pat) != pc_rtx)
146 return 0;
147 if (GET_CODE (SET_DEST (pat)) != IF_THEN_ELSE)
148 return 0;
149 pat = SET_DEST (pat);
150 cond = XEXP (pat, 0);
151 if (GET_CODE (XEXP (cond, 1)) == LABEL_REF
152 && XEXP (cond, 2) == pc_rtx)
153 return cond;
154 else if (GET_CODE (XEXP (cond, 2)) == LABEL_REF
155 && XEXP (cond, 1) == pc_rtx)
156 return gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)), GET_MODE (cond),
157 XEXP (cond, 0), XEXP (cond, 1));
158 else
159 return 0;
162 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
164 static int
165 conditions_mutex_p (cond1, cond2)
166 rtx cond1, cond2;
168 if (GET_RTX_CLASS (GET_CODE (cond1)) == '<'
169 && GET_RTX_CLASS (GET_CODE (cond2)) == '<'
170 && GET_CODE (cond1) == reverse_condition (GET_CODE (cond2))
171 && XEXP (cond1, 0) == XEXP (cond2, 0)
172 && XEXP (cond1, 1) == XEXP (cond2, 1))
173 return 1;
174 return 0;
177 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
178 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
179 of dependence that this link represents. */
181 void
182 add_dependence (insn, elem, dep_type)
183 rtx insn;
184 rtx elem;
185 enum reg_note dep_type;
187 rtx link, next;
188 int present_p;
189 rtx cond1, cond2;
191 /* Don't depend an insn on itself. */
192 if (insn == elem)
193 return;
195 /* We can get a dependency on deleted insns due to optimizations in
196 the register allocation and reloading or due to splitting. Any
197 such dependency is useless and can be ignored. */
198 if (GET_CODE (elem) == NOTE)
199 return;
201 /* flow.c doesn't handle conditional lifetimes entirely correctly;
202 calls mess up the conditional lifetimes. */
203 /* ??? add_dependence is the wrong place to be eliding dependencies,
204 as that forgets that the condition expressions themselves may
205 be dependent. */
206 if (GET_CODE (insn) != CALL_INSN && GET_CODE (elem) != CALL_INSN)
208 cond1 = get_condition (insn);
209 cond2 = get_condition (elem);
210 if (cond1 && cond2
211 && conditions_mutex_p (cond1, cond2)
212 /* Make sure first instruction doesn't affect condition of second
213 instruction if switched. */
214 && !modified_in_p (cond1, elem)
215 /* Make sure second instruction doesn't affect condition of first
216 instruction if switched. */
217 && !modified_in_p (cond2, insn))
218 return;
221 /* If elem is part of a sequence that must be scheduled together, then
222 make the dependence point to the last insn of the sequence.
223 When HAVE_cc0, it is possible for NOTEs to exist between users and
224 setters of the condition codes, so we must skip past notes here.
225 Otherwise, NOTEs are impossible here. */
226 next = next_nonnote_insn (elem);
227 if (next && INSN_P (next) && SCHED_GROUP_P (next))
229 /* Notes will never intervene here though, so don't bother checking
230 for them. */
231 /* Hah! Wrong. */
232 /* We must reject CODE_LABELs, so that we don't get confused by one
233 that has LABEL_PRESERVE_P set, which is represented by the same
234 bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
235 SCHED_GROUP_P. */
237 rtx nnext;
238 while ((nnext = next_nonnote_insn (next)) != NULL
239 && INSN_P (nnext)
240 && SCHED_GROUP_P (nnext))
241 next = nnext;
243 /* Again, don't depend an insn on itself. */
244 if (insn == next)
245 return;
247 /* Make the dependence to NEXT, the last insn of the group,
248 instead of the original ELEM. */
249 elem = next;
253 present_p = 1;
254 #ifdef INSN_SCHEDULING
255 /* ??? No good way to tell from here whether we're doing interblock
256 scheduling. Possibly add another callback. */
257 #if 0
258 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
259 No need for interblock dependences with calls, since
260 calls are not moved between blocks. Note: the edge where
261 elem is a CALL is still required. */
262 if (GET_CODE (insn) == CALL_INSN
263 && (INSN_BB (elem) != INSN_BB (insn)))
264 return;
265 #endif
267 /* If we already have a dependency for ELEM, then we do not need to
268 do anything. Avoiding the list walk below can cut compile times
269 dramatically for some code. */
270 if (true_dependency_cache != NULL)
272 enum reg_note present_dep_type = 0;
274 if (anti_dependency_cache == NULL || output_dependency_cache == NULL)
275 abort ();
276 if (TEST_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem)))
277 /* Do nothing (present_set_type is already 0). */
279 else if (TEST_BIT (anti_dependency_cache[INSN_LUID (insn)],
280 INSN_LUID (elem)))
281 present_dep_type = REG_DEP_ANTI;
282 else if (TEST_BIT (output_dependency_cache[INSN_LUID (insn)],
283 INSN_LUID (elem)))
284 present_dep_type = REG_DEP_OUTPUT;
285 else
286 present_p = 0;
287 if (present_p && (int) dep_type >= (int) present_dep_type)
288 return;
290 #endif
292 /* Check that we don't already have this dependence. */
293 if (present_p)
294 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
295 if (XEXP (link, 0) == elem)
297 #ifdef INSN_SCHEDULING
298 /* Clear corresponding cache entry because type of the link
299 may be changed. */
300 if (true_dependency_cache != NULL)
302 if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
303 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
304 INSN_LUID (elem));
305 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
306 && output_dependency_cache)
307 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
308 INSN_LUID (elem));
309 else
310 abort ();
312 #endif
314 /* If this is a more restrictive type of dependence than the existing
315 one, then change the existing dependence to this type. */
316 if ((int) dep_type < (int) REG_NOTE_KIND (link))
317 PUT_REG_NOTE_KIND (link, dep_type);
319 #ifdef INSN_SCHEDULING
320 /* If we are adding a dependency to INSN's LOG_LINKs, then
321 note that in the bitmap caches of dependency information. */
322 if (true_dependency_cache != NULL)
324 if ((int) REG_NOTE_KIND (link) == 0)
325 SET_BIT (true_dependency_cache[INSN_LUID (insn)],
326 INSN_LUID (elem));
327 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
328 SET_BIT (anti_dependency_cache[INSN_LUID (insn)],
329 INSN_LUID (elem));
330 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
331 SET_BIT (output_dependency_cache[INSN_LUID (insn)],
332 INSN_LUID (elem));
334 #endif
335 return;
337 /* Might want to check one level of transitivity to save conses. */
339 link = alloc_INSN_LIST (elem, LOG_LINKS (insn));
340 LOG_LINKS (insn) = link;
342 /* Insn dependency, not data dependency. */
343 PUT_REG_NOTE_KIND (link, dep_type);
345 #ifdef INSN_SCHEDULING
346 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
347 in the bitmap caches of dependency information. */
348 if (true_dependency_cache != NULL)
350 if ((int) dep_type == 0)
351 SET_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
352 else if (dep_type == REG_DEP_ANTI)
353 SET_BIT (anti_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
354 else if (dep_type == REG_DEP_OUTPUT)
355 SET_BIT (output_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
357 #endif
360 /* A convenience wrapper to operate on an entire list. */
362 static void
363 add_dependence_list (insn, list, dep_type)
364 rtx insn, list;
365 enum reg_note dep_type;
367 for (; list; list = XEXP (list, 1))
368 add_dependence (insn, XEXP (list, 0), dep_type);
371 /* Similar, but free *LISTP at the same time. */
373 static void
374 add_dependence_list_and_free (insn, listp, dep_type)
375 rtx insn;
376 rtx *listp;
377 enum reg_note dep_type;
379 rtx list, next;
380 for (list = *listp, *listp = NULL; list ; list = next)
382 next = XEXP (list, 1);
383 add_dependence (insn, XEXP (list, 0), dep_type);
384 free_INSN_LIST_node (list);
388 /* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
389 of INSN. Abort if not found. */
391 static void
392 remove_dependence (insn, elem)
393 rtx insn;
394 rtx elem;
396 rtx prev, link, next;
397 int found = 0;
399 for (prev = 0, link = LOG_LINKS (insn); link; link = next)
401 next = XEXP (link, 1);
402 if (XEXP (link, 0) == elem)
404 if (prev)
405 XEXP (prev, 1) = next;
406 else
407 LOG_LINKS (insn) = next;
409 #ifdef INSN_SCHEDULING
410 /* If we are removing a dependency from the LOG_LINKS list,
411 make sure to remove it from the cache too. */
412 if (true_dependency_cache != NULL)
414 if (REG_NOTE_KIND (link) == 0)
415 RESET_BIT (true_dependency_cache[INSN_LUID (insn)],
416 INSN_LUID (elem));
417 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
418 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
419 INSN_LUID (elem));
420 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
421 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
422 INSN_LUID (elem));
424 #endif
426 free_INSN_LIST_node (link);
428 found = 1;
430 else
431 prev = link;
434 if (!found)
435 abort ();
436 return;
439 /* Return an insn which represents a SCHED_GROUP, which is
440 the last insn in the group. */
442 static rtx
443 group_leader (insn)
444 rtx insn;
446 rtx prev;
450 prev = insn;
451 insn = next_nonnote_insn (insn);
453 while (insn && INSN_P (insn) && SCHED_GROUP_P (insn));
455 return prev;
458 /* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
459 goes along with that. */
461 static void
462 set_sched_group_p (insn)
463 rtx insn;
465 rtx link, prev;
467 SCHED_GROUP_P (insn) = 1;
469 /* There may be a note before this insn now, but all notes will
470 be removed before we actually try to schedule the insns, so
471 it won't cause a problem later. We must avoid it here
472 though. */
473 prev = prev_nonnote_insn (insn);
475 /* Make a copy of all dependencies on the immediately previous
476 insn, and add to this insn. This is so that all the
477 dependencies will apply to the group. Remove an explicit
478 dependence on this insn as SCHED_GROUP_P now represents it. */
480 if (find_insn_list (prev, LOG_LINKS (insn)))
481 remove_dependence (insn, prev);
483 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
484 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
487 /* Process an insn's memory dependencies. There are four kinds of
488 dependencies:
490 (0) read dependence: read follows read
491 (1) true dependence: read follows write
492 (2) anti dependence: write follows read
493 (3) output dependence: write follows write
495 We are careful to build only dependencies which actually exist, and
496 use transitivity to avoid building too many links. */
498 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
499 The MEM is a memory reference contained within INSN, which we are saving
500 so that we can do memory aliasing on it. */
502 void
503 add_insn_mem_dependence (deps, insn_list, mem_list, insn, mem)
504 struct deps *deps;
505 rtx *insn_list, *mem_list, insn, mem;
507 rtx link;
509 link = alloc_INSN_LIST (insn, *insn_list);
510 *insn_list = link;
512 if (current_sched_info->use_cselib)
514 mem = shallow_copy_rtx (mem);
515 XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
517 link = alloc_EXPR_LIST (VOIDmode, mem, *mem_list);
518 *mem_list = link;
520 deps->pending_lists_length++;
523 /* Make a dependency between every memory reference on the pending lists
524 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
525 dependencies for a read operation, similarly with FOR_WRITE. */
527 static void
528 flush_pending_lists (deps, insn, for_read, for_write)
529 struct deps *deps;
530 rtx insn;
531 int for_read, for_write;
533 if (for_write)
535 add_dependence_list_and_free (insn, &deps->pending_read_insns,
536 REG_DEP_ANTI);
537 free_EXPR_LIST_list (&deps->pending_read_mems);
540 add_dependence_list_and_free (insn, &deps->pending_write_insns,
541 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
542 free_EXPR_LIST_list (&deps->pending_write_mems);
543 deps->pending_lists_length = 0;
545 add_dependence_list_and_free (insn, &deps->last_pending_memory_flush,
546 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
547 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
548 deps->pending_flush_length = 1;
551 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
552 rtx, X, creating all dependencies generated by the write to the
553 destination of X, and reads of everything mentioned. */
555 static void
556 sched_analyze_1 (deps, x, insn)
557 struct deps *deps;
558 rtx x;
559 rtx insn;
561 int regno;
562 rtx dest = XEXP (x, 0);
563 enum rtx_code code = GET_CODE (x);
565 if (dest == 0)
566 return;
568 if (GET_CODE (dest) == PARALLEL)
570 int i;
572 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
573 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
574 sched_analyze_1 (deps,
575 gen_rtx_CLOBBER (VOIDmode,
576 XEXP (XVECEXP (dest, 0, i), 0)),
577 insn);
579 if (GET_CODE (x) == SET)
580 sched_analyze_2 (deps, SET_SRC (x), insn);
581 return;
584 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
585 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
587 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
589 /* The second and third arguments are values read by this insn. */
590 sched_analyze_2 (deps, XEXP (dest, 1), insn);
591 sched_analyze_2 (deps, XEXP (dest, 2), insn);
593 dest = XEXP (dest, 0);
596 if (GET_CODE (dest) == REG)
598 regno = REGNO (dest);
600 /* A hard reg in a wide mode may really be multiple registers.
601 If so, mark all of them just like the first. */
602 if (regno < FIRST_PSEUDO_REGISTER)
604 int i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
605 if (code == SET)
607 while (--i >= 0)
608 SET_REGNO_REG_SET (reg_pending_sets, regno + i);
610 else
612 while (--i >= 0)
613 SET_REGNO_REG_SET (reg_pending_clobbers, regno + i);
616 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
617 it does not reload. Ignore these as they have served their
618 purpose already. */
619 else if (regno >= deps->max_reg)
621 if (GET_CODE (PATTERN (insn)) != USE
622 && GET_CODE (PATTERN (insn)) != CLOBBER)
623 abort ();
625 else
627 if (code == SET)
628 SET_REGNO_REG_SET (reg_pending_sets, regno);
629 else
630 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
632 /* Pseudos that are REG_EQUIV to something may be replaced
633 by that during reloading. We need only add dependencies for
634 the address in the REG_EQUIV note. */
635 if (!reload_completed
636 && reg_known_equiv_p[regno]
637 && GET_CODE (reg_known_value[regno]) == MEM)
638 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
640 /* Don't let it cross a call after scheduling if it doesn't
641 already cross one. */
642 if (REG_N_CALLS_CROSSED (regno) == 0)
643 add_dependence_list (insn, deps->last_function_call, REG_DEP_ANTI);
646 else if (GET_CODE (dest) == MEM)
648 /* Writing memory. */
649 rtx t = dest;
651 if (current_sched_info->use_cselib)
653 t = shallow_copy_rtx (dest);
654 cselib_lookup (XEXP (t, 0), Pmode, 1);
655 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
658 if (deps->pending_lists_length > MAX_PENDING_LIST_LENGTH)
660 /* Flush all pending reads and writes to prevent the pending lists
661 from getting any larger. Insn scheduling runs too slowly when
662 these lists get long. When compiling GCC with itself,
663 this flush occurs 8 times for sparc, and 10 times for m88k using
664 the default value of 32. */
665 flush_pending_lists (deps, insn, false, true);
667 else
669 rtx pending, pending_mem;
671 pending = deps->pending_read_insns;
672 pending_mem = deps->pending_read_mems;
673 while (pending)
675 if (anti_dependence (XEXP (pending_mem, 0), t))
676 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
678 pending = XEXP (pending, 1);
679 pending_mem = XEXP (pending_mem, 1);
682 pending = deps->pending_write_insns;
683 pending_mem = deps->pending_write_mems;
684 while (pending)
686 if (output_dependence (XEXP (pending_mem, 0), t))
687 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
689 pending = XEXP (pending, 1);
690 pending_mem = XEXP (pending_mem, 1);
693 add_dependence_list (insn, deps->last_pending_memory_flush,
694 REG_DEP_ANTI);
696 add_insn_mem_dependence (deps, &deps->pending_write_insns,
697 &deps->pending_write_mems, insn, dest);
699 sched_analyze_2 (deps, XEXP (dest, 0), insn);
702 /* Analyze reads. */
703 if (GET_CODE (x) == SET)
704 sched_analyze_2 (deps, SET_SRC (x), insn);
707 /* Analyze the uses of memory and registers in rtx X in INSN. */
709 static void
710 sched_analyze_2 (deps, x, insn)
711 struct deps *deps;
712 rtx x;
713 rtx insn;
715 int i;
716 int j;
717 enum rtx_code code;
718 const char *fmt;
720 if (x == 0)
721 return;
723 code = GET_CODE (x);
725 switch (code)
727 case CONST_INT:
728 case CONST_DOUBLE:
729 case CONST_VECTOR:
730 case SYMBOL_REF:
731 case CONST:
732 case LABEL_REF:
733 /* Ignore constants. Note that we must handle CONST_DOUBLE here
734 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
735 this does not mean that this insn is using cc0. */
736 return;
738 #ifdef HAVE_cc0
739 case CC0:
740 /* User of CC0 depends on immediately preceding insn. */
741 set_sched_group_p (insn);
742 return;
743 #endif
745 case REG:
747 int regno = REGNO (x);
748 if (regno < FIRST_PSEUDO_REGISTER)
750 int i = HARD_REGNO_NREGS (regno, GET_MODE (x));
751 while (--i >= 0)
752 SET_REGNO_REG_SET (reg_pending_uses, regno + i);
754 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
755 it does not reload. Ignore these as they have served their
756 purpose already. */
757 else if (regno >= deps->max_reg)
759 if (GET_CODE (PATTERN (insn)) != USE
760 && GET_CODE (PATTERN (insn)) != CLOBBER)
761 abort ();
763 else
765 SET_REGNO_REG_SET (reg_pending_uses, regno);
767 /* Pseudos that are REG_EQUIV to something may be replaced
768 by that during reloading. We need only add dependencies for
769 the address in the REG_EQUIV note. */
770 if (!reload_completed
771 && reg_known_equiv_p[regno]
772 && GET_CODE (reg_known_value[regno]) == MEM)
773 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
775 /* If the register does not already cross any calls, then add this
776 insn to the sched_before_next_call list so that it will still
777 not cross calls after scheduling. */
778 if (REG_N_CALLS_CROSSED (regno) == 0)
779 deps->sched_before_next_call
780 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
782 return;
785 case MEM:
787 /* Reading memory. */
788 rtx u;
789 rtx pending, pending_mem;
790 rtx t = x;
792 if (current_sched_info->use_cselib)
794 t = shallow_copy_rtx (t);
795 cselib_lookup (XEXP (t, 0), Pmode, 1);
796 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
798 pending = deps->pending_read_insns;
799 pending_mem = deps->pending_read_mems;
800 while (pending)
802 if (read_dependence (XEXP (pending_mem, 0), t))
803 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
805 pending = XEXP (pending, 1);
806 pending_mem = XEXP (pending_mem, 1);
809 pending = deps->pending_write_insns;
810 pending_mem = deps->pending_write_mems;
811 while (pending)
813 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
814 t, rtx_varies_p))
815 add_dependence (insn, XEXP (pending, 0), 0);
817 pending = XEXP (pending, 1);
818 pending_mem = XEXP (pending_mem, 1);
821 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
822 if (GET_CODE (XEXP (u, 0)) != JUMP_INSN
823 || deps_may_trap_p (x))
824 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
826 /* Always add these dependencies to pending_reads, since
827 this insn may be followed by a write. */
828 add_insn_mem_dependence (deps, &deps->pending_read_insns,
829 &deps->pending_read_mems, insn, x);
831 /* Take advantage of tail recursion here. */
832 sched_analyze_2 (deps, XEXP (x, 0), insn);
833 return;
836 /* Force pending stores to memory in case a trap handler needs them. */
837 case TRAP_IF:
838 flush_pending_lists (deps, insn, true, false);
839 break;
841 case ASM_OPERANDS:
842 case ASM_INPUT:
843 case UNSPEC_VOLATILE:
845 /* Traditional and volatile asm instructions must be considered to use
846 and clobber all hard registers, all pseudo-registers and all of
847 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
849 Consider for instance a volatile asm that changes the fpu rounding
850 mode. An insn should not be moved across this even if it only uses
851 pseudo-regs because it might give an incorrectly rounded result. */
852 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
853 reg_pending_barrier = true;
855 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
856 We can not just fall through here since then we would be confused
857 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
858 traditional asms unlike their normal usage. */
860 if (code == ASM_OPERANDS)
862 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
863 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
864 return;
866 break;
869 case PRE_DEC:
870 case POST_DEC:
871 case PRE_INC:
872 case POST_INC:
873 /* These both read and modify the result. We must handle them as writes
874 to get proper dependencies for following instructions. We must handle
875 them as reads to get proper dependencies from this to previous
876 instructions. Thus we need to pass them to both sched_analyze_1
877 and sched_analyze_2. We must call sched_analyze_2 first in order
878 to get the proper antecedent for the read. */
879 sched_analyze_2 (deps, XEXP (x, 0), insn);
880 sched_analyze_1 (deps, x, insn);
881 return;
883 case POST_MODIFY:
884 case PRE_MODIFY:
885 /* op0 = op0 + op1 */
886 sched_analyze_2 (deps, XEXP (x, 0), insn);
887 sched_analyze_2 (deps, XEXP (x, 1), insn);
888 sched_analyze_1 (deps, x, insn);
889 return;
891 default:
892 break;
895 /* Other cases: walk the insn. */
896 fmt = GET_RTX_FORMAT (code);
897 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
899 if (fmt[i] == 'e')
900 sched_analyze_2 (deps, XEXP (x, i), insn);
901 else if (fmt[i] == 'E')
902 for (j = 0; j < XVECLEN (x, i); j++)
903 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
907 /* Analyze an INSN with pattern X to find all dependencies. */
909 static void
910 sched_analyze_insn (deps, x, insn, loop_notes)
911 struct deps *deps;
912 rtx x, insn;
913 rtx loop_notes;
915 RTX_CODE code = GET_CODE (x);
916 rtx link;
917 int i;
919 if (code == COND_EXEC)
921 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
923 /* ??? Should be recording conditions so we reduce the number of
924 false dependencies. */
925 x = COND_EXEC_CODE (x);
926 code = GET_CODE (x);
928 if (code == SET || code == CLOBBER)
930 sched_analyze_1 (deps, x, insn);
932 /* Bare clobber insns are used for letting life analysis, reg-stack
933 and others know that a value is dead. Depend on the last call
934 instruction so that reg-stack won't get confused. */
935 if (code == CLOBBER)
936 add_dependence_list (insn, deps->last_function_call, REG_DEP_OUTPUT);
938 else if (code == PARALLEL)
940 int i;
941 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
943 rtx sub = XVECEXP (x, 0, i);
944 code = GET_CODE (sub);
946 if (code == COND_EXEC)
948 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
949 sub = COND_EXEC_CODE (sub);
950 code = GET_CODE (sub);
952 if (code == SET || code == CLOBBER)
953 sched_analyze_1 (deps, sub, insn);
954 else
955 sched_analyze_2 (deps, sub, insn);
958 else
959 sched_analyze_2 (deps, x, insn);
961 /* Mark registers CLOBBERED or used by called function. */
962 if (GET_CODE (insn) == CALL_INSN)
964 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
966 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
967 sched_analyze_1 (deps, XEXP (link, 0), insn);
968 else
969 sched_analyze_2 (deps, XEXP (link, 0), insn);
971 if (find_reg_note (insn, REG_SETJMP, NULL))
972 reg_pending_barrier = true;
975 if (GET_CODE (insn) == JUMP_INSN)
977 rtx next;
978 next = next_nonnote_insn (insn);
979 if (next && GET_CODE (next) == BARRIER)
980 reg_pending_barrier = true;
981 else
983 rtx pending, pending_mem;
984 regset_head tmp;
985 INIT_REG_SET (&tmp);
987 (*current_sched_info->compute_jump_reg_dependencies) (insn, &tmp);
988 /* Make latency of jump equal to 0 by using anti-dependence. */
989 EXECUTE_IF_SET_IN_REG_SET (&tmp, 0, i,
991 struct deps_reg *reg_last = &deps->reg_last[i];
992 add_dependence_list (insn, reg_last->sets, REG_DEP_ANTI);
993 add_dependence_list (insn, reg_last->clobbers, REG_DEP_ANTI);
994 reg_last->uses_length++;
995 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
997 CLEAR_REG_SET (&tmp);
999 /* All memory writes and volatile reads must happen before the
1000 jump. Non-volatile reads must happen before the jump iff
1001 the result is needed by the above register used mask. */
1003 pending = deps->pending_write_insns;
1004 pending_mem = deps->pending_write_mems;
1005 while (pending)
1007 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1008 pending = XEXP (pending, 1);
1009 pending_mem = XEXP (pending_mem, 1);
1012 pending = deps->pending_read_insns;
1013 pending_mem = deps->pending_read_mems;
1014 while (pending)
1016 if (MEM_VOLATILE_P (XEXP (pending_mem, 0)))
1017 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1018 pending = XEXP (pending, 1);
1019 pending_mem = XEXP (pending_mem, 1);
1022 add_dependence_list (insn, deps->last_pending_memory_flush,
1023 REG_DEP_ANTI);
1027 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
1028 block, then we must be sure that no instructions are scheduled across it.
1029 Otherwise, the reg_n_refs info (which depends on loop_depth) would
1030 become incorrect. */
1031 if (loop_notes)
1033 rtx link;
1035 /* Update loop_notes with any notes from this insn. Also determine
1036 if any of the notes on the list correspond to instruction scheduling
1037 barriers (loop, eh & setjmp notes, but not range notes). */
1038 link = loop_notes;
1039 while (XEXP (link, 1))
1041 if (INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_BEG
1042 || INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END
1043 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_BEG
1044 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END)
1045 reg_pending_barrier = true;
1047 link = XEXP (link, 1);
1049 XEXP (link, 1) = REG_NOTES (insn);
1050 REG_NOTES (insn) = loop_notes;
1053 /* If this instruction can throw an exception, then moving it changes
1054 where block boundaries fall. This is mighty confusing elsewhere.
1055 Therefore, prevent such an instruction from being moved. */
1056 if (can_throw_internal (insn))
1057 reg_pending_barrier = true;
1059 /* Add dependencies if a scheduling barrier was found. */
1060 if (reg_pending_barrier)
1062 /* In the case of barrier the most added dependencies are not
1063 real, so we use anti-dependence here. */
1064 if (GET_CODE (PATTERN (insn)) == COND_EXEC)
1066 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
1068 struct deps_reg *reg_last = &deps->reg_last[i];
1069 add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1070 add_dependence_list (insn, reg_last->sets, REG_DEP_ANTI);
1071 add_dependence_list (insn, reg_last->clobbers, REG_DEP_ANTI);
1074 else
1076 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
1078 struct deps_reg *reg_last = &deps->reg_last[i];
1079 add_dependence_list_and_free (insn, &reg_last->uses,
1080 REG_DEP_ANTI);
1081 add_dependence_list_and_free (insn, &reg_last->sets,
1082 REG_DEP_ANTI);
1083 add_dependence_list_and_free (insn, &reg_last->clobbers,
1084 REG_DEP_ANTI);
1085 reg_last->uses_length = 0;
1086 reg_last->clobbers_length = 0;
1090 for (i = 0; i < deps->max_reg; i++)
1092 struct deps_reg *reg_last = &deps->reg_last[i];
1093 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1094 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1097 flush_pending_lists (deps, insn, true, true);
1098 reg_pending_barrier = false;
1100 else
1102 /* If the current insn is conditional, we can't free any
1103 of the lists. */
1104 if (GET_CODE (PATTERN (insn)) == COND_EXEC)
1106 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i,
1108 struct deps_reg *reg_last = &deps->reg_last[i];
1109 add_dependence_list (insn, reg_last->sets, 0);
1110 add_dependence_list (insn, reg_last->clobbers, 0);
1111 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1112 reg_last->uses_length++;
1114 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
1116 struct deps_reg *reg_last = &deps->reg_last[i];
1117 add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
1118 add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1119 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1120 reg_last->clobbers_length++;
1122 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
1124 struct deps_reg *reg_last = &deps->reg_last[i];
1125 add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
1126 add_dependence_list (insn, reg_last->clobbers, REG_DEP_OUTPUT);
1127 add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1128 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1131 else
1133 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i,
1135 struct deps_reg *reg_last = &deps->reg_last[i];
1136 add_dependence_list (insn, reg_last->sets, 0);
1137 add_dependence_list (insn, reg_last->clobbers, 0);
1138 reg_last->uses_length++;
1139 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1141 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
1143 struct deps_reg *reg_last = &deps->reg_last[i];
1144 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
1145 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
1147 add_dependence_list_and_free (insn, &reg_last->sets,
1148 REG_DEP_OUTPUT);
1149 add_dependence_list_and_free (insn, &reg_last->uses,
1150 REG_DEP_ANTI);
1151 add_dependence_list_and_free (insn, &reg_last->clobbers,
1152 REG_DEP_OUTPUT);
1153 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1154 reg_last->clobbers_length = 0;
1155 reg_last->uses_length = 0;
1157 else
1159 add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
1160 add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1162 reg_last->clobbers_length++;
1163 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1165 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
1167 struct deps_reg *reg_last = &deps->reg_last[i];
1168 add_dependence_list_and_free (insn, &reg_last->sets,
1169 REG_DEP_OUTPUT);
1170 add_dependence_list_and_free (insn, &reg_last->clobbers,
1171 REG_DEP_OUTPUT);
1172 add_dependence_list_and_free (insn, &reg_last->uses,
1173 REG_DEP_ANTI);
1174 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1175 reg_last->uses_length = 0;
1176 reg_last->clobbers_length = 0;
1180 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
1181 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
1182 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
1184 CLEAR_REG_SET (reg_pending_uses);
1185 CLEAR_REG_SET (reg_pending_clobbers);
1186 CLEAR_REG_SET (reg_pending_sets);
1188 /* If we are currently in a libcall scheduling group, then mark the
1189 current insn as being in a scheduling group and that it can not
1190 be moved into a different basic block. */
1192 if (deps->libcall_block_tail_insn)
1194 set_sched_group_p (insn);
1195 CANT_MOVE (insn) = 1;
1198 /* If a post-call group is still open, see if it should remain so.
1199 This insn must be a simple move of a hard reg to a pseudo or
1200 vice-versa.
1202 We must avoid moving these insns for correctness on
1203 SMALL_REGISTER_CLASS machines, and for special registers like
1204 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1205 hard regs for all targets. */
1207 if (deps->in_post_call_group_p)
1209 rtx tmp, set = single_set (insn);
1210 int src_regno, dest_regno;
1212 if (set == NULL)
1213 goto end_call_group;
1215 tmp = SET_DEST (set);
1216 if (GET_CODE (tmp) == SUBREG)
1217 tmp = SUBREG_REG (tmp);
1218 if (GET_CODE (tmp) == REG)
1219 dest_regno = REGNO (tmp);
1220 else
1221 goto end_call_group;
1223 tmp = SET_SRC (set);
1224 if (GET_CODE (tmp) == SUBREG)
1225 tmp = SUBREG_REG (tmp);
1226 if (GET_CODE (tmp) == REG)
1227 src_regno = REGNO (tmp);
1228 else
1229 goto end_call_group;
1231 if (src_regno < FIRST_PSEUDO_REGISTER
1232 || dest_regno < FIRST_PSEUDO_REGISTER)
1234 set_sched_group_p (insn);
1235 CANT_MOVE (insn) = 1;
1237 else
1239 end_call_group:
1240 deps->in_post_call_group_p = false;
1245 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1246 for every dependency. */
1248 void
1249 sched_analyze (deps, head, tail)
1250 struct deps *deps;
1251 rtx head, tail;
1253 rtx insn;
1254 rtx loop_notes = 0;
1256 if (current_sched_info->use_cselib)
1257 cselib_init ();
1259 for (insn = head;; insn = NEXT_INSN (insn))
1261 rtx link, end_seq, r0, set;
1263 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1265 /* Clear out the stale LOG_LINKS from flow. */
1266 free_INSN_LIST_list (&LOG_LINKS (insn));
1268 /* Make each JUMP_INSN a scheduling barrier for memory
1269 references. */
1270 if (GET_CODE (insn) == JUMP_INSN)
1272 /* Keep the list a reasonable size. */
1273 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
1274 flush_pending_lists (deps, insn, true, true);
1275 else
1276 deps->last_pending_memory_flush
1277 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
1279 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1280 loop_notes = 0;
1282 else if (GET_CODE (insn) == CALL_INSN)
1284 int i;
1286 CANT_MOVE (insn) = 1;
1288 /* Clear out the stale LOG_LINKS from flow. */
1289 free_INSN_LIST_list (&LOG_LINKS (insn));
1291 if (find_reg_note (insn, REG_SETJMP, NULL))
1293 /* This is setjmp. Assume that all registers, not just
1294 hard registers, may be clobbered by this call. */
1295 reg_pending_barrier = true;
1297 else
1299 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1300 /* A call may read and modify global register variables. */
1301 if (global_regs[i])
1303 SET_REGNO_REG_SET (reg_pending_sets, i);
1304 SET_REGNO_REG_SET (reg_pending_uses, i);
1306 /* Other call-clobbered hard regs may be clobbered.
1307 Since we only have a choice between 'might be clobbered'
1308 and 'definitely not clobbered', we must include all
1309 partly call-clobbered registers here. */
1310 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
1311 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
1312 SET_REGNO_REG_SET (reg_pending_clobbers, i);
1313 /* We don't know what set of fixed registers might be used
1314 by the function, but it is certain that the stack pointer
1315 is among them, but be conservative. */
1316 else if (fixed_regs[i])
1317 SET_REGNO_REG_SET (reg_pending_uses, i);
1318 /* The frame pointer is normally not used by the function
1319 itself, but by the debugger. */
1320 /* ??? MIPS o32 is an exception. It uses the frame pointer
1321 in the macro expansion of jal but does not represent this
1322 fact in the call_insn rtl. */
1323 else if (i == FRAME_POINTER_REGNUM
1324 || (i == HARD_FRAME_POINTER_REGNUM
1325 && (! reload_completed || frame_pointer_needed)))
1326 SET_REGNO_REG_SET (reg_pending_uses, i);
1329 /* For each insn which shouldn't cross a call, add a dependence
1330 between that insn and this call insn. */
1331 add_dependence_list_and_free (insn, &deps->sched_before_next_call,
1332 REG_DEP_ANTI);
1334 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1335 loop_notes = 0;
1337 /* In the absence of interprocedural alias analysis, we must flush
1338 all pending reads and writes, and start new dependencies starting
1339 from here. But only flush writes for constant calls (which may
1340 be passed a pointer to something we haven't written yet). */
1341 flush_pending_lists (deps, insn, true, !CONST_OR_PURE_CALL_P (insn));
1343 /* Remember the last function call for limiting lifetimes. */
1344 free_INSN_LIST_list (&deps->last_function_call);
1345 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
1347 /* Before reload, begin a post-call group, so as to keep the
1348 lifetimes of hard registers correct. */
1349 if (! reload_completed)
1350 deps->in_post_call_group_p = true;
1353 /* See comments on reemit_notes as to why we do this.
1354 ??? Actually, the reemit_notes just say what is done, not why. */
1356 if (GET_CODE (insn) == NOTE
1357 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
1358 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
1359 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1360 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END))
1362 rtx rtx_region;
1364 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1365 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END)
1366 rtx_region = GEN_INT (NOTE_EH_HANDLER (insn));
1367 else
1368 rtx_region = GEN_INT (0);
1370 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1371 rtx_region,
1372 loop_notes);
1373 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1374 GEN_INT (NOTE_LINE_NUMBER (insn)),
1375 loop_notes);
1376 CONST_OR_PURE_CALL_P (loop_notes) = CONST_OR_PURE_CALL_P (insn);
1379 if (current_sched_info->use_cselib)
1380 cselib_process_insn (insn);
1382 /* Now that we have completed handling INSN, check and see if it is
1383 a CLOBBER beginning a libcall block. If it is, record the
1384 end of the libcall sequence.
1386 We want to schedule libcall blocks as a unit before reload. While
1387 this restricts scheduling, it preserves the meaning of a libcall
1388 block.
1390 As a side effect, we may get better code due to decreased register
1391 pressure as well as less chance of a foreign insn appearing in
1392 a libcall block. */
1393 if (!reload_completed
1394 /* Note we may have nested libcall sequences. We only care about
1395 the outermost libcall sequence. */
1396 && deps->libcall_block_tail_insn == 0
1397 /* The sequence must start with a clobber of a register. */
1398 && GET_CODE (insn) == INSN
1399 && GET_CODE (PATTERN (insn)) == CLOBBER
1400 && (r0 = XEXP (PATTERN (insn), 0), GET_CODE (r0) == REG)
1401 && GET_CODE (XEXP (PATTERN (insn), 0)) == REG
1402 /* The CLOBBER must also have a REG_LIBCALL note attached. */
1403 && (link = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0
1404 && (end_seq = XEXP (link, 0)) != 0
1405 /* The insn referenced by the REG_LIBCALL note must be a
1406 simple nop copy with the same destination as the register
1407 mentioned in the clobber. */
1408 && (set = single_set (end_seq)) != 0
1409 && SET_DEST (set) == r0 && SET_SRC (set) == r0
1410 /* And finally the insn referenced by the REG_LIBCALL must
1411 also contain a REG_EQUAL note and a REG_RETVAL note. */
1412 && find_reg_note (end_seq, REG_EQUAL, NULL_RTX) != 0
1413 && find_reg_note (end_seq, REG_RETVAL, NULL_RTX) != 0)
1414 deps->libcall_block_tail_insn = XEXP (link, 0);
1416 /* If we have reached the end of a libcall block, then close the
1417 block. */
1418 if (deps->libcall_block_tail_insn == insn)
1419 deps->libcall_block_tail_insn = 0;
1421 if (insn == tail)
1423 if (current_sched_info->use_cselib)
1424 cselib_finish ();
1425 return;
1428 abort ();
1431 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1432 dependences from LOG_LINKS to build forward dependences in
1433 INSN_DEPEND. */
1435 void
1436 compute_forward_dependences (head, tail)
1437 rtx head, tail;
1439 rtx insn, link;
1440 rtx next_tail;
1441 enum reg_note dep_type;
1443 next_tail = NEXT_INSN (tail);
1444 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1446 if (! INSN_P (insn))
1447 continue;
1449 insn = group_leader (insn);
1451 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
1453 rtx x = group_leader (XEXP (link, 0));
1454 rtx new_link;
1456 if (x != XEXP (link, 0))
1457 continue;
1459 #ifdef ENABLE_CHECKING
1460 /* If add_dependence is working properly there should never
1461 be notes, deleted insns or duplicates in the backward
1462 links. Thus we need not check for them here.
1464 However, if we have enabled checking we might as well go
1465 ahead and verify that add_dependence worked properly. */
1466 if (GET_CODE (x) == NOTE
1467 || INSN_DELETED_P (x)
1468 || (forward_dependency_cache != NULL
1469 && TEST_BIT (forward_dependency_cache[INSN_LUID (x)],
1470 INSN_LUID (insn)))
1471 || (forward_dependency_cache == NULL
1472 && find_insn_list (insn, INSN_DEPEND (x))))
1473 abort ();
1474 if (forward_dependency_cache != NULL)
1475 SET_BIT (forward_dependency_cache[INSN_LUID (x)],
1476 INSN_LUID (insn));
1477 #endif
1479 new_link = alloc_INSN_LIST (insn, INSN_DEPEND (x));
1481 dep_type = REG_NOTE_KIND (link);
1482 PUT_REG_NOTE_KIND (new_link, dep_type);
1484 INSN_DEPEND (x) = new_link;
1485 INSN_DEP_COUNT (insn) += 1;
1490 /* Initialize variables for region data dependence analysis.
1491 n_bbs is the number of region blocks. */
1493 void
1494 init_deps (deps)
1495 struct deps *deps;
1497 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
1499 deps->max_reg = max_reg;
1500 deps->reg_last = (struct deps_reg *)
1501 xcalloc (max_reg, sizeof (struct deps_reg));
1502 INIT_REG_SET (&deps->reg_last_in_use);
1504 deps->pending_read_insns = 0;
1505 deps->pending_read_mems = 0;
1506 deps->pending_write_insns = 0;
1507 deps->pending_write_mems = 0;
1508 deps->pending_lists_length = 0;
1509 deps->pending_flush_length = 0;
1510 deps->last_pending_memory_flush = 0;
1511 deps->last_function_call = 0;
1512 deps->sched_before_next_call = 0;
1513 deps->in_post_call_group_p = false;
1514 deps->libcall_block_tail_insn = 0;
1517 /* Free insn lists found in DEPS. */
1519 void
1520 free_deps (deps)
1521 struct deps *deps;
1523 int i;
1525 free_INSN_LIST_list (&deps->pending_read_insns);
1526 free_EXPR_LIST_list (&deps->pending_read_mems);
1527 free_INSN_LIST_list (&deps->pending_write_insns);
1528 free_EXPR_LIST_list (&deps->pending_write_mems);
1529 free_INSN_LIST_list (&deps->last_pending_memory_flush);
1531 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
1532 times. For a test case with 42000 regs and 8000 small basic blocks,
1533 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
1534 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
1536 struct deps_reg *reg_last = &deps->reg_last[i];
1537 if (reg_last->uses)
1538 free_INSN_LIST_list (&reg_last->uses);
1539 if (reg_last->sets)
1540 free_INSN_LIST_list (&reg_last->sets);
1541 if (reg_last->clobbers)
1542 free_INSN_LIST_list (&reg_last->clobbers);
1544 CLEAR_REG_SET (&deps->reg_last_in_use);
1546 free (deps->reg_last);
1549 /* If it is profitable to use them, initialize caches for tracking
1550 dependency information. LUID is the number of insns to be scheduled,
1551 it is used in the estimate of profitability. */
1553 void
1554 init_dependency_caches (luid)
1555 int luid;
1557 /* ?!? We could save some memory by computing a per-region luid mapping
1558 which could reduce both the number of vectors in the cache and the size
1559 of each vector. Instead we just avoid the cache entirely unless the
1560 average number of instructions in a basic block is very high. See
1561 the comment before the declaration of true_dependency_cache for
1562 what we consider "very high". */
1563 if (luid / n_basic_blocks > 100 * 5)
1565 true_dependency_cache = sbitmap_vector_alloc (luid, luid);
1566 sbitmap_vector_zero (true_dependency_cache, luid);
1567 anti_dependency_cache = sbitmap_vector_alloc (luid, luid);
1568 sbitmap_vector_zero (anti_dependency_cache, luid);
1569 output_dependency_cache = sbitmap_vector_alloc (luid, luid);
1570 sbitmap_vector_zero (output_dependency_cache, luid);
1571 #ifdef ENABLE_CHECKING
1572 forward_dependency_cache = sbitmap_vector_alloc (luid, luid);
1573 sbitmap_vector_zero (forward_dependency_cache, luid);
1574 #endif
1578 /* Free the caches allocated in init_dependency_caches. */
1580 void
1581 free_dependency_caches ()
1583 if (true_dependency_cache)
1585 sbitmap_vector_free (true_dependency_cache);
1586 true_dependency_cache = NULL;
1587 sbitmap_vector_free (anti_dependency_cache);
1588 anti_dependency_cache = NULL;
1589 sbitmap_vector_free (output_dependency_cache);
1590 output_dependency_cache = NULL;
1591 #ifdef ENABLE_CHECKING
1592 sbitmap_vector_free (forward_dependency_cache);
1593 forward_dependency_cache = NULL;
1594 #endif
1598 /* Initialize some global variables needed by the dependency analysis
1599 code. */
1601 void
1602 init_deps_global ()
1604 reg_pending_sets = INITIALIZE_REG_SET (reg_pending_sets_head);
1605 reg_pending_clobbers = INITIALIZE_REG_SET (reg_pending_clobbers_head);
1606 reg_pending_uses = INITIALIZE_REG_SET (reg_pending_uses_head);
1607 reg_pending_barrier = false;
1610 /* Free everything used by the dependency analysis code. */
1612 void
1613 finish_deps_global ()
1615 FREE_REG_SET (reg_pending_sets);
1616 FREE_REG_SET (reg_pending_clobbers);
1617 FREE_REG_SET (reg_pending_uses);