* sched-deps.c (find_insn_list): Remove.
[official-gcc.git] / gcc / sched-int.h
blobb6396570de23997c07c0f858604da5ec0ceb3766
1 /* Instruction scheduling pass. This file contains definitions used
2 internally in the scheduler.
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
23 #ifndef GCC_SCHED_INT_H
24 #define GCC_SCHED_INT_H
26 /* For state_t. */
27 #include "insn-attr.h"
28 /* For regset_head. */
29 #include "basic-block.h"
30 /* For reg_note. */
31 #include "rtl.h"
33 /* Pointer to data describing the current DFA state. */
34 extern state_t curr_state;
36 /* Forward declaration. */
37 struct ready_list;
39 /* Type to represent status of a dependence. */
40 typedef int ds_t;
42 /* Type to represent weakness of speculative dependence. */
43 typedef int dw_t;
45 extern enum reg_note ds_to_dk (ds_t);
46 extern ds_t dk_to_ds (enum reg_note);
48 /* Information about the dependency. */
49 struct _dep
51 /* Producer. */
52 rtx pro;
54 /* Consumer. */
55 rtx con;
57 /* Dependency kind (aka dependency major type). This field is superseded
58 by STATUS below. Though, it is still in place because all the backends
59 use it. */
60 enum reg_note kind;
62 /* Dependency status. This field holds all dependency types and additional
63 information for speculative dependencies. */
64 ds_t status;
66 typedef struct _dep *dep_t;
68 #define DEP_PRO(D) ((D)->pro)
69 #define DEP_CON(D) ((D)->con)
70 #define DEP_KIND(D) ((D)->kind)
71 #define DEP_STATUS(D) ((D)->status)
73 /* Functions to work with dep. */
75 extern void init_dep (dep_t, rtx, rtx, enum reg_note);
77 /* Definition of this struct resides below. */
78 struct _dep_node;
80 /* A link in the dependency list. This is essentially an equivalent of a
81 single {INSN, DEPS}_LIST rtx. */
82 struct _dep_link
84 /* Dep node with all the data. */
85 struct _dep_node *node;
87 /* Next link in the list. For the last one it is NULL. */
88 struct _dep_link *next;
90 /* Pointer to the next field of the previous link in the list.
91 For the first link this points to the deps_list->first.
93 With help of this field it is easy to remove and insert links to the
94 list. */
95 struct _dep_link **prev_nextp;
97 typedef struct _dep_link *dep_link_t;
99 #define DEP_LINK_NODE(N) ((N)->node)
100 #define DEP_LINK_NEXT(N) ((N)->next)
101 #define DEP_LINK_PREV_NEXTP(N) ((N)->prev_nextp)
103 /* Macros to work dep_link. For most usecases only part of the dependency
104 information is need. These macros conveniently provide that piece of
105 information. */
107 #define DEP_LINK_DEP(N) (DEP_NODE_DEP (DEP_LINK_NODE (N)))
108 #define DEP_LINK_PRO(N) (DEP_PRO (DEP_LINK_DEP (N)))
109 #define DEP_LINK_CON(N) (DEP_CON (DEP_LINK_DEP (N)))
110 #define DEP_LINK_KIND(N) (DEP_KIND (DEP_LINK_DEP (N)))
111 #define DEP_LINK_STATUS(N) (DEP_STATUS (DEP_LINK_DEP (N)))
113 void debug_dep_links (dep_link_t);
115 /* A list of dep_links. Lists of this type are now used instead of rtx
116 LOG_LINKS and alike lists. */
117 struct _deps_list
119 dep_link_t first;
121 typedef struct _deps_list *deps_list_t;
123 #define DEPS_LIST_FIRST(L) ((L)->first)
125 /* Macro to walk through deps_list. */
126 #define FOR_EACH_DEP_LINK(LINK, LIST) \
127 for ((LINK) = DEPS_LIST_FIRST (LIST); \
128 (LINK) != NULL; \
129 (LINK) = DEP_LINK_NEXT (LINK))
131 /* Functions to work with deps_list. */
133 deps_list_t create_deps_list (bool);
134 void free_deps_list (deps_list_t);
135 void delete_deps_list (deps_list_t);
136 bool deps_list_empty_p (deps_list_t);
137 void debug_deps_list (deps_list_t);
138 void add_back_dep_to_deps_list (deps_list_t, dep_t);
139 dep_link_t find_link_by_pro_in_deps_list (deps_list_t, rtx);
140 dep_link_t find_link_by_con_in_deps_list (deps_list_t, rtx);
141 void copy_deps_list_change_con (deps_list_t, deps_list_t, rtx);
143 void move_dep_link (dep_link_t, deps_list_t);
145 /* Suppose we have a dependence Y between insn pro1 and con1, where pro1 has
146 additional dependents con0 and con2, and con1 is dependent on additional
147 insns pro0 and pro1:
149 .con0 pro0
150 . ^ |
151 . | |
152 . | |
153 . X A
154 . | |
155 . | |
156 . | V
157 .pro1--Y-->con1
158 . | ^
159 . | |
160 . | |
161 . Z B
162 . | |
163 . | |
164 . V |
165 .con2 pro2
167 This is represented using a "dep_node" for each dependence arc, which are
168 connected as follows (diagram is centered around Y which is fully shown;
169 other dep_nodes shown partially):
171 . +------------+ +--------------+ +------------+
172 . : dep_node X : | dep_node Y | : dep_node Z :
173 . : : | | : :
174 . : : | | : :
175 . : forw : | forw | : forw :
176 . : +--------+ : | +--------+ | : +--------+ :
177 forw_deps : |dep_link| : | |dep_link| | : |dep_link| :
178 +-----+ : | +----+ | : | | +----+ | | : | +----+ | :
179 |first|----->| |next|-+------+->| |next|-+--+----->| |next|-+--->NULL
180 +-----+ : | +----+ | : | | +----+ | | : | +----+ | :
181 . ^ ^ : | ^ | : | | ^ | | : | | :
182 . | | : | | | : | | | | | : | | :
183 . | +--<----+--+ +--+---<--+--+--+ +--+--+--<---+--+ | :
184 . | : | | | : | | | | | : | | | :
185 . | : | +----+ | : | | +----+ | | : | +----+ | :
186 . | : | |prev| | : | | |prev| | | : | |prev| | :
187 . | : | |next| | : | | |next| | | : | |next| | :
188 . | : | +----+ | : | | +----+ | | : | +----+ | :
189 . | : | | :<-+ | | | |<-+ : | | :<-+
190 . | : | +----+ | : | | | +----+ | | | : | +----+ | : |
191 . | : | |node|-+----+ | | |node|-+--+--+ : | |node|-+----+
192 . | : | +----+ | : | | +----+ | | : | +----+ | :
193 . | : | | : | | | | : | | :
194 . | : +--------+ : | +--------+ | : +--------+ :
195 . | : : | | : :
196 . | : SAME pro1 : | +--------+ | : SAME pro1 :
197 . | : DIFF con0 : | |dep | | : DIFF con2 :
198 . | : : | | | | : :
199 . | | | +----+ | |
200 .RTX<------------------------+--+-|pro1| | |
201 .pro1 | | +----+ | |
202 . | | | |
203 . | | +----+ | |
204 .RTX<------------------------+--+-|con1| | |
205 .con1 | | +----+ | |
206 . | | | | |
207 . | | | +----+ | |
208 . | | | |kind| | |
209 . | | | +----+ | |
210 . | : : | | |stat| | | : :
211 . | : DIFF pro0 : | | +----+ | | : DIFF pro2 :
212 . | : SAME con1 : | | | | : SAME con1 :
213 . | : : | +--------+ | : :
214 . | : : | | : :
215 . | : back : | back | : back :
216 . v : +--------+ : | +--------+ | : +--------+ :
217 back_deps : |dep_link| : | |dep_link| | : |dep_link| :
218 +-----+ : | +----+ | : | | +----+ | | : | +----+ | :
219 |first|----->| |next|-+------+->| |next|-+--+----->| |next|-+--->NULL
220 +-----+ : | +----+ | : | | +----+ | | : | +----+ | :
221 . ^ : | ^ | : | | ^ | | : | | :
222 . | : | | | : | | | | | : | | :
223 . +--<----+--+ +--+---<--+--+--+ +--+--+--<---+--+ | :
224 . : | | | : | | | | | : | | | :
225 . : | +----+ | : | | +----+ | | : | +----+ | :
226 . : | |prev| | : | | |prev| | | : | |prev| | :
227 . : | |next| | : | | |next| | | : | |next| | :
228 . : | +----+ | : | | +----+ | | : | +----+ | :
229 . : | | :<-+ | | | |<-+ : | | :<-+
230 . : | +----+ | : | | | +----+ | | | : | +----+ | : |
231 . : | |node|-+----+ | | |node|-+--+--+ : | |node|-+----+
232 . : | +----+ | : | | +----+ | | : | +----+ | :
233 . : | | : | | | | : | | :
234 . : +--------+ : | +--------+ | : +--------+ :
235 . : : | | : :
236 . : dep_node A : | dep_node Y | : dep_node B :
237 . +------------+ +--------------+ +------------+
240 struct _dep_node
242 /* Backward link. */
243 struct _dep_link back;
245 /* The dep. */
246 struct _dep dep;
248 /* Forward link. */
249 struct _dep_link forw;
251 typedef struct _dep_node *dep_node_t;
253 #define DEP_NODE_BACK(N) (&(N)->back)
254 #define DEP_NODE_DEP(N) (&(N)->dep)
255 #define DEP_NODE_FORW(N) (&(N)->forw)
257 /* Describe state of dependencies used during sched_analyze phase. */
258 struct deps
260 /* The *_insns and *_mems are paired lists. Each pending memory operation
261 will have a pointer to the MEM rtx on one list and a pointer to the
262 containing insn on the other list in the same place in the list. */
264 /* We can't use add_dependence like the old code did, because a single insn
265 may have multiple memory accesses, and hence needs to be on the list
266 once for each memory access. Add_dependence won't let you add an insn
267 to a list more than once. */
269 /* An INSN_LIST containing all insns with pending read operations. */
270 rtx pending_read_insns;
272 /* An EXPR_LIST containing all MEM rtx's which are pending reads. */
273 rtx pending_read_mems;
275 /* An INSN_LIST containing all insns with pending write operations. */
276 rtx pending_write_insns;
278 /* An EXPR_LIST containing all MEM rtx's which are pending writes. */
279 rtx pending_write_mems;
281 /* Indicates the combined length of the two pending lists. We must prevent
282 these lists from ever growing too large since the number of dependencies
283 produced is at least O(N*N), and execution time is at least O(4*N*N), as
284 a function of the length of these pending lists. */
285 int pending_lists_length;
287 /* Length of the pending memory flush list. Large functions with no
288 calls may build up extremely large lists. */
289 int pending_flush_length;
291 /* The last insn upon which all memory references must depend.
292 This is an insn which flushed the pending lists, creating a dependency
293 between it and all previously pending memory references. This creates
294 a barrier (or a checkpoint) which no memory reference is allowed to cross.
296 This includes all non constant CALL_INSNs. When we do interprocedural
297 alias analysis, this restriction can be relaxed.
298 This may also be an INSN that writes memory if the pending lists grow
299 too large. */
300 rtx last_pending_memory_flush;
302 /* A list of the last function calls we have seen. We use a list to
303 represent last function calls from multiple predecessor blocks.
304 Used to prevent register lifetimes from expanding unnecessarily. */
305 rtx last_function_call;
307 /* A list of insns which use a pseudo register that does not already
308 cross a call. We create dependencies between each of those insn
309 and the next call insn, to ensure that they won't cross a call after
310 scheduling is done. */
311 rtx sched_before_next_call;
313 /* Used to keep post-call pseudo/hard reg movements together with
314 the call. */
315 enum { not_post_call, post_call, post_call_initial } in_post_call_group_p;
317 /* Set to the tail insn of the outermost libcall block.
319 When nonzero, we will mark each insn processed by sched_analyze_insn
320 with SCHED_GROUP_P to ensure libcalls are scheduled as a unit. */
321 rtx libcall_block_tail_insn;
323 /* The maximum register number for the following arrays. Before reload
324 this is max_reg_num; after reload it is FIRST_PSEUDO_REGISTER. */
325 int max_reg;
327 /* Element N is the next insn that sets (hard or pseudo) register
328 N within the current basic block; or zero, if there is no
329 such insn. Needed for new registers which may be introduced
330 by splitting insns. */
331 struct deps_reg
333 rtx uses;
334 rtx sets;
335 rtx clobbers;
336 int uses_length;
337 int clobbers_length;
338 } *reg_last;
340 /* Element N is set for each register that has any nonzero element
341 in reg_last[N].{uses,sets,clobbers}. */
342 regset_head reg_last_in_use;
344 /* Element N is set for each register that is conditionally set. */
345 regset_head reg_conditional_sets;
348 /* This structure holds some state of the current scheduling pass, and
349 contains some function pointers that abstract out some of the non-generic
350 functionality from functions such as schedule_block or schedule_insn.
351 There is one global variable, current_sched_info, which points to the
352 sched_info structure currently in use. */
353 struct sched_info
355 /* Add all insns that are initially ready to the ready list. Called once
356 before scheduling a set of insns. */
357 void (*init_ready_list) (void);
358 /* Called after taking an insn from the ready list. Returns nonzero if
359 this insn can be scheduled, nonzero if we should silently discard it. */
360 int (*can_schedule_ready_p) (rtx);
361 /* Return nonzero if there are more insns that should be scheduled. */
362 int (*schedule_more_p) (void);
363 /* Called after an insn has all its hard dependencies resolved.
364 Adjusts status of instruction (which is passed through second parameter)
365 to indicate if instruction should be moved to the ready list or the
366 queue, or if it should silently discard it (until next resolved
367 dependence). */
368 ds_t (*new_ready) (rtx, ds_t);
369 /* Compare priority of two insns. Return a positive number if the second
370 insn is to be preferred for scheduling, and a negative one if the first
371 is to be preferred. Zero if they are equally good. */
372 int (*rank) (rtx, rtx);
373 /* Return a string that contains the insn uid and optionally anything else
374 necessary to identify this insn in an output. It's valid to use a
375 static buffer for this. The ALIGNED parameter should cause the string
376 to be formatted so that multiple output lines will line up nicely. */
377 const char *(*print_insn) (rtx, int);
378 /* Return nonzero if an insn should be included in priority
379 calculations. */
380 int (*contributes_to_priority) (rtx, rtx);
381 /* Called when computing dependencies for a JUMP_INSN. This function
382 should store the set of registers that must be considered as set by
383 the jump in the regset. */
384 void (*compute_jump_reg_dependencies) (rtx, regset, regset, regset);
386 /* The boundaries of the set of insns to be scheduled. */
387 rtx prev_head, next_tail;
389 /* Filled in after the schedule is finished; the first and last scheduled
390 insns. */
391 rtx head, tail;
393 /* If nonzero, enables an additional sanity check in schedule_block. */
394 unsigned int queue_must_finish_empty:1;
395 /* Nonzero if we should use cselib for better alias analysis. This
396 must be 0 if the dependency information is used after sched_analyze
397 has completed, e.g. if we're using it to initialize state for successor
398 blocks in region scheduling. */
399 unsigned int use_cselib:1;
401 /* Maximum priority that has been assigned to an insn. */
402 int sched_max_insns_priority;
404 /* Hooks to support speculative scheduling. */
406 /* Called to notify frontend that instruction is being added (second
407 parameter == 0) or removed (second parameter == 1). */
408 void (*add_remove_insn) (rtx, int);
410 /* Called to notify frontend that instruction is being scheduled.
411 The first parameter - instruction to scheduled, the second parameter -
412 last scheduled instruction. */
413 void (*begin_schedule_ready) (rtx, rtx);
415 /* Called to notify frontend, that new basic block is being added.
416 The first parameter - new basic block.
417 The second parameter - block, after which new basic block is being added,
418 or EXIT_BLOCK_PTR, if recovery block is being added,
419 or NULL, if standalone block is being added. */
420 void (*add_block) (basic_block, basic_block);
422 /* If the second parameter is not NULL, return nonnull value, if the
423 basic block should be advanced.
424 If the second parameter is NULL, return the next basic block in EBB.
425 The first parameter is the current basic block in EBB. */
426 basic_block (*advance_target_bb) (basic_block, rtx);
428 /* Called after blocks were rearranged due to movement of jump instruction.
429 The first parameter - index of basic block, in which jump currently is.
430 The second parameter - index of basic block, in which jump used
431 to be.
432 The third parameter - index of basic block, that follows the second
433 parameter. */
434 void (*fix_recovery_cfg) (int, int, int);
436 #ifdef ENABLE_CHECKING
437 /* If the second parameter is zero, return nonzero, if block is head of the
438 region.
439 If the second parameter is nonzero, return nonzero, if block is leaf of
440 the region.
441 global_live_at_start should not change in region heads and
442 global_live_at_end should not change in region leafs due to scheduling. */
443 int (*region_head_or_leaf_p) (basic_block, int);
444 #endif
446 /* ??? FIXME: should use straight bitfields inside sched_info instead of
447 this flag field. */
448 unsigned int flags;
451 /* This structure holds description of the properties for speculative
452 scheduling. */
453 struct spec_info_def
455 /* Holds types of allowed speculations: BEGIN_{DATA|CONTROL},
456 BE_IN_{DATA_CONTROL}. */
457 int mask;
459 /* A dump file for additional information on speculative scheduling. */
460 FILE *dump;
462 /* Minimal cumulative weakness of speculative instruction's
463 dependencies, so that insn will be scheduled. */
464 dw_t weakness_cutoff;
466 /* Flags from the enum SPEC_SCHED_FLAGS. */
467 int flags;
469 typedef struct spec_info_def *spec_info_t;
471 extern struct sched_info *current_sched_info;
473 /* Indexed by INSN_UID, the collection of all data associated with
474 a single instruction. */
476 struct haifa_insn_data
478 /* NB: We can't place 'struct _deps_list' here instead of deps_list_t into
479 h_i_d because when h_i_d extends, addresses of the deps_list->first
480 change without updating deps_list->first->next->prev_nextp. Thus
481 BACK_DEPS and RESOLVED_BACK_DEPS are allocated on the heap and FORW_DEPS
482 list is allocated on the obstack. */
484 /* A list of backward dependencies. The insn is a consumer of all the
485 deps mentioned here. */
486 deps_list_t back_deps;
488 /* A list of insns which depend on the instruction. Unlike 'back_deps',
489 it represents forward dependencies. */
490 deps_list_t forw_deps;
492 /* A list of scheduled producers of the instruction. Links are being moved
493 from 'back_deps' to 'resolved_back_deps' while scheduling. */
494 deps_list_t resolved_back_deps;
496 /* Logical uid gives the original ordering of the insns. */
497 int luid;
499 /* A priority for each insn. */
500 int priority;
502 /* The number of incoming edges in the forward dependency graph.
503 As scheduling proceeds, counts are decreased. An insn moves to
504 the ready queue when its counter reaches zero. */
505 int dep_count;
507 /* Number of instructions referring to this insn. */
508 int ref_count;
510 /* The minimum clock tick at which the insn becomes ready. This is
511 used to note timing constraints for the insns in the pending list. */
512 int tick;
514 /* INTER_TICK is used to adjust INSN_TICKs of instructions from the
515 subsequent blocks in a region. */
516 int inter_tick;
518 /* See comment on QUEUE_INDEX macro in haifa-sched.c. */
519 int queue_index;
521 short cost;
523 /* This weight is an estimation of the insn's contribution to
524 register pressure. */
525 short reg_weight;
527 /* Some insns (e.g. call) are not allowed to move across blocks. */
528 unsigned int cant_move : 1;
530 /* Set if there's DEF-USE dependence between some speculatively
531 moved load insn and this one. */
532 unsigned int fed_by_spec_load : 1;
533 unsigned int is_load_insn : 1;
535 /* Nonzero if priority has been computed already. */
536 unsigned int priority_known : 1;
538 /* Nonzero if instruction has internal dependence
539 (e.g. add_dependence was invoked with (insn == elem)). */
540 unsigned int has_internal_dep : 1;
542 /* What speculations are necessary to apply to schedule the instruction. */
543 ds_t todo_spec;
544 /* What speculations were already applied. */
545 ds_t done_spec;
546 /* What speculations are checked by this instruction. */
547 ds_t check_spec;
549 /* Recovery block for speculation checks. */
550 basic_block recovery_block;
552 /* Original pattern of the instruction. */
553 rtx orig_pat;
556 extern struct haifa_insn_data *h_i_d;
557 /* Used only if (current_sched_info->flags & USE_GLAT) != 0.
558 These regsets store global_live_at_{start, end} information
559 for each basic block. */
560 extern regset *glat_start, *glat_end;
562 /* Accessor macros for h_i_d. There are more in haifa-sched.c and
563 sched-rgn.c. */
564 #define INSN_BACK_DEPS(INSN) (h_i_d[INSN_UID (INSN)].back_deps)
565 #define INSN_FORW_DEPS(INSN) (h_i_d[INSN_UID (INSN)].forw_deps)
566 #define INSN_RESOLVED_BACK_DEPS(INSN) \
567 (h_i_d[INSN_UID (INSN)].resolved_back_deps)
568 #define INSN_LUID(INSN) (h_i_d[INSN_UID (INSN)].luid)
569 #define CANT_MOVE(insn) (h_i_d[INSN_UID (insn)].cant_move)
570 #define INSN_DEP_COUNT(INSN) (h_i_d[INSN_UID (INSN)].dep_count)
571 #define INSN_PRIORITY(INSN) (h_i_d[INSN_UID (INSN)].priority)
572 #define INSN_PRIORITY_KNOWN(INSN) (h_i_d[INSN_UID (INSN)].priority_known)
573 #define INSN_REG_WEIGHT(INSN) (h_i_d[INSN_UID (INSN)].reg_weight)
574 #define HAS_INTERNAL_DEP(INSN) (h_i_d[INSN_UID (INSN)].has_internal_dep)
575 #define TODO_SPEC(INSN) (h_i_d[INSN_UID (INSN)].todo_spec)
576 #define DONE_SPEC(INSN) (h_i_d[INSN_UID (INSN)].done_spec)
577 #define CHECK_SPEC(INSN) (h_i_d[INSN_UID (INSN)].check_spec)
578 #define RECOVERY_BLOCK(INSN) (h_i_d[INSN_UID (INSN)].recovery_block)
579 #define ORIG_PAT(INSN) (h_i_d[INSN_UID (INSN)].orig_pat)
581 /* INSN is either a simple or a branchy speculation check. */
582 #define IS_SPECULATION_CHECK_P(INSN) (RECOVERY_BLOCK (INSN) != NULL)
584 /* INSN is a speculation check that will simply reexecute the speculatively
585 scheduled instruction if the speculation fails. */
586 #define IS_SPECULATION_SIMPLE_CHECK_P(INSN) \
587 (RECOVERY_BLOCK (INSN) == EXIT_BLOCK_PTR)
589 /* INSN is a speculation check that will branch to RECOVERY_BLOCK if the
590 speculation fails. Insns in that block will reexecute the speculatively
591 scheduled code and then will return immediately after INSN thus preserving
592 semantics of the program. */
593 #define IS_SPECULATION_BRANCHY_CHECK_P(INSN) \
594 (RECOVERY_BLOCK (INSN) != NULL && RECOVERY_BLOCK (INSN) != EXIT_BLOCK_PTR)
596 /* Dep status (aka ds_t) of the link encapsulates information, that is needed
597 for speculative scheduling. Namely, it is 4 integers in the range
598 [0, MAX_DEP_WEAK] and 3 bits.
599 The integers correspond to the probability of the dependence to *not*
600 exist, it is the probability, that overcoming of this dependence will
601 not be followed by execution of the recovery code. Nevertheless,
602 whatever high the probability of success is, recovery code should still
603 be generated to preserve semantics of the program. To find a way to
604 get/set these integers, please refer to the {get, set}_dep_weak ()
605 functions in sched-deps.c .
606 The 3 bits in the DEP_STATUS correspond to 3 dependence types: true-,
607 output- and anti- dependence. It is not enough for speculative scheduling
608 to know just the major type of all the dependence between two instructions,
609 as only true dependence can be overcome.
610 There also is the 4-th bit in the DEP_STATUS (HARD_DEP), that is reserved
611 for using to describe instruction's status. It is set whenever instruction
612 has at least one dependence, that cannot be overcame.
613 See also: check_dep_status () in sched-deps.c . */
615 /* We exclude sign bit. */
616 #define BITS_PER_DEP_STATUS (HOST_BITS_PER_INT - 1)
618 /* First '4' stands for 3 dep type bits and HARD_DEP bit.
619 Second '4' stands for BEGIN_{DATA, CONTROL}, BE_IN_{DATA, CONTROL}
620 dep weakness. */
621 #define BITS_PER_DEP_WEAK ((BITS_PER_DEP_STATUS - 4) / 4)
623 /* Mask of speculative weakness in dep_status. */
624 #define DEP_WEAK_MASK ((1 << BITS_PER_DEP_WEAK) - 1)
626 /* This constant means that dependence is fake with 99.999...% probability.
627 This is the maximum value, that can appear in dep_status.
628 Note, that we don't want MAX_DEP_WEAK to be the same as DEP_WEAK_MASK for
629 debugging reasons. Though, it can be set to DEP_WEAK_MASK, and, when
630 done so, we'll get fast (mul for)/(div by) NO_DEP_WEAK. */
631 #define MAX_DEP_WEAK (DEP_WEAK_MASK - 1)
633 /* This constant means that dependence is 99.999...% real and it is a really
634 bad idea to overcome it (though this can be done, preserving program
635 semantics). */
636 #define MIN_DEP_WEAK 1
638 /* This constant represents 100% probability.
639 E.g. it is used to represent weakness of dependence, that doesn't exist. */
640 #define NO_DEP_WEAK (MAX_DEP_WEAK + MIN_DEP_WEAK)
642 /* Default weakness of speculative dependence. Used when we can't say
643 neither bad nor good about the dependence. */
644 #define UNCERTAIN_DEP_WEAK (MAX_DEP_WEAK - MAX_DEP_WEAK / 4)
646 /* Offset for speculative weaknesses in dep_status. */
647 enum SPEC_TYPES_OFFSETS {
648 BEGIN_DATA_BITS_OFFSET = 0,
649 BE_IN_DATA_BITS_OFFSET = BEGIN_DATA_BITS_OFFSET + BITS_PER_DEP_WEAK,
650 BEGIN_CONTROL_BITS_OFFSET = BE_IN_DATA_BITS_OFFSET + BITS_PER_DEP_WEAK,
651 BE_IN_CONTROL_BITS_OFFSET = BEGIN_CONTROL_BITS_OFFSET + BITS_PER_DEP_WEAK
654 /* The following defines provide numerous constants used to distinguish between
655 different types of speculative dependencies. */
657 /* Dependence can be overcome with generation of new data speculative
658 instruction. */
659 #define BEGIN_DATA (((ds_t) DEP_WEAK_MASK) << BEGIN_DATA_BITS_OFFSET)
661 /* This dependence is to the instruction in the recovery block, that was
662 formed to recover after data-speculation failure.
663 Thus, this dependence can overcome with generating of the copy of
664 this instruction in the recovery block. */
665 #define BE_IN_DATA (((ds_t) DEP_WEAK_MASK) << BE_IN_DATA_BITS_OFFSET)
667 /* Dependence can be overcome with generation of new control speculative
668 instruction. */
669 #define BEGIN_CONTROL (((ds_t) DEP_WEAK_MASK) << BEGIN_CONTROL_BITS_OFFSET)
671 /* This dependence is to the instruction in the recovery block, that was
672 formed to recover after control-speculation failure.
673 Thus, this dependence can be overcome with generating of the copy of
674 this instruction in the recovery block. */
675 #define BE_IN_CONTROL (((ds_t) DEP_WEAK_MASK) << BE_IN_CONTROL_BITS_OFFSET)
677 /* A few convenient combinations. */
678 #define BEGIN_SPEC (BEGIN_DATA | BEGIN_CONTROL)
679 #define DATA_SPEC (BEGIN_DATA | BE_IN_DATA)
680 #define CONTROL_SPEC (BEGIN_CONTROL | BE_IN_CONTROL)
681 #define SPECULATIVE (DATA_SPEC | CONTROL_SPEC)
682 #define BE_IN_SPEC (BE_IN_DATA | BE_IN_CONTROL)
684 /* Constants, that are helpful in iterating through dep_status. */
685 #define FIRST_SPEC_TYPE BEGIN_DATA
686 #define LAST_SPEC_TYPE BE_IN_CONTROL
687 #define SPEC_TYPE_SHIFT BITS_PER_DEP_WEAK
689 /* Dependence on instruction can be of multiple types
690 (e.g. true and output). This fields enhance REG_NOTE_KIND information
691 of the dependence. */
692 #define DEP_TRUE (((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + BITS_PER_DEP_WEAK))
693 #define DEP_OUTPUT (DEP_TRUE << 1)
694 #define DEP_ANTI (DEP_OUTPUT << 1)
696 #define DEP_TYPES (DEP_TRUE | DEP_OUTPUT | DEP_ANTI)
698 /* Instruction has non-speculative dependence. This bit represents the
699 property of an instruction - not the one of a dependence.
700 Therefore, it can appear only in TODO_SPEC field of an instruction. */
701 #define HARD_DEP (DEP_ANTI << 1)
703 /* This represents the results of calling sched-deps.c functions,
704 which modify dependencies. Possible choices are: a dependence
705 is already present and nothing has been changed; a dependence type
706 has been changed; brand new dependence has been created. */
707 enum DEPS_ADJUST_RESULT {
708 DEP_PRESENT = 1,
709 DEP_CHANGED = 2,
710 DEP_CREATED = 3
713 /* Represents the bits that can be set in the flags field of the
714 sched_info structure. */
715 enum SCHED_FLAGS {
716 /* If set, generate links between instruction as DEPS_LIST.
717 Otherwise, generate usual INSN_LIST links. */
718 USE_DEPS_LIST = 1,
719 /* Perform data or control (or both) speculation.
720 Results in generation of data and control speculative dependencies.
721 Requires USE_DEPS_LIST set. */
722 DO_SPECULATION = USE_DEPS_LIST << 1,
723 SCHED_RGN = DO_SPECULATION << 1,
724 SCHED_EBB = SCHED_RGN << 1,
725 /* Detach register live information from basic block headers.
726 This is necessary to invoke functions, that change CFG (e.g. split_edge).
727 Requires USE_GLAT. */
728 DETACH_LIFE_INFO = SCHED_EBB << 1,
729 /* Save register live information from basic block headers to
730 glat_{start, end} arrays. */
731 USE_GLAT = DETACH_LIFE_INFO << 1
734 enum SPEC_SCHED_FLAGS {
735 COUNT_SPEC_IN_CRITICAL_PATH = 1,
736 PREFER_NON_DATA_SPEC = COUNT_SPEC_IN_CRITICAL_PATH << 1,
737 PREFER_NON_CONTROL_SPEC = PREFER_NON_DATA_SPEC << 1
740 #define NOTE_NOT_BB_P(NOTE) (NOTE_P (NOTE) && (NOTE_LINE_NUMBER (NOTE) \
741 != NOTE_INSN_BASIC_BLOCK))
743 extern FILE *sched_dump;
744 extern int sched_verbose;
746 /* Exception Free Loads:
748 We define five classes of speculative loads: IFREE, IRISKY,
749 PFREE, PRISKY, and MFREE.
751 IFREE loads are loads that are proved to be exception-free, just
752 by examining the load insn. Examples for such loads are loads
753 from TOC and loads of global data.
755 IRISKY loads are loads that are proved to be exception-risky,
756 just by examining the load insn. Examples for such loads are
757 volatile loads and loads from shared memory.
759 PFREE loads are loads for which we can prove, by examining other
760 insns, that they are exception-free. Currently, this class consists
761 of loads for which we are able to find a "similar load", either in
762 the target block, or, if only one split-block exists, in that split
763 block. Load2 is similar to load1 if both have same single base
764 register. We identify only part of the similar loads, by finding
765 an insn upon which both load1 and load2 have a DEF-USE dependence.
767 PRISKY loads are loads for which we can prove, by examining other
768 insns, that they are exception-risky. Currently we have two proofs for
769 such loads. The first proof detects loads that are probably guarded by a
770 test on the memory address. This proof is based on the
771 backward and forward data dependence information for the region.
772 Let load-insn be the examined load.
773 Load-insn is PRISKY iff ALL the following hold:
775 - insn1 is not in the same block as load-insn
776 - there is a DEF-USE dependence chain (insn1, ..., load-insn)
777 - test-insn is either a compare or a branch, not in the same block
778 as load-insn
779 - load-insn is reachable from test-insn
780 - there is a DEF-USE dependence chain (insn1, ..., test-insn)
782 This proof might fail when the compare and the load are fed
783 by an insn not in the region. To solve this, we will add to this
784 group all loads that have no input DEF-USE dependence.
786 The second proof detects loads that are directly or indirectly
787 fed by a speculative load. This proof is affected by the
788 scheduling process. We will use the flag fed_by_spec_load.
789 Initially, all insns have this flag reset. After a speculative
790 motion of an insn, if insn is either a load, or marked as
791 fed_by_spec_load, we will also mark as fed_by_spec_load every
792 insn1 for which a DEF-USE dependence (insn, insn1) exists. A
793 load which is fed_by_spec_load is also PRISKY.
795 MFREE (maybe-free) loads are all the remaining loads. They may be
796 exception-free, but we cannot prove it.
798 Now, all loads in IFREE and PFREE classes are considered
799 exception-free, while all loads in IRISKY and PRISKY classes are
800 considered exception-risky. As for loads in the MFREE class,
801 these are considered either exception-free or exception-risky,
802 depending on whether we are pessimistic or optimistic. We have
803 to take the pessimistic approach to assure the safety of
804 speculative scheduling, but we can take the optimistic approach
805 by invoking the -fsched_spec_load_dangerous option. */
807 enum INSN_TRAP_CLASS
809 TRAP_FREE = 0, IFREE = 1, PFREE_CANDIDATE = 2,
810 PRISKY_CANDIDATE = 3, IRISKY = 4, TRAP_RISKY = 5
813 #define WORST_CLASS(class1, class2) \
814 ((class1 > class2) ? class1 : class2)
816 #ifndef __GNUC__
817 #define __inline
818 #endif
820 #ifndef HAIFA_INLINE
821 #define HAIFA_INLINE __inline
822 #endif
824 /* Functions in sched-vis.c. */
825 extern void print_insn (char *, rtx, int);
827 /* Functions in sched-deps.c. */
828 extern bool sched_insns_conditions_mutex_p (rtx, rtx);
829 extern void add_dependence (rtx, rtx, enum reg_note);
830 extern void sched_analyze (struct deps *, rtx, rtx);
831 extern void init_deps (struct deps *);
832 extern void free_deps (struct deps *);
833 extern void init_deps_global (void);
834 extern void finish_deps_global (void);
835 extern void add_forw_dep (dep_link_t);
836 extern void compute_forward_dependences (rtx, rtx);
837 extern void init_dependency_caches (int);
838 extern void free_dependency_caches (void);
839 extern void extend_dependency_caches (int, bool);
840 extern enum DEPS_ADJUST_RESULT add_or_update_back_dep (rtx, rtx,
841 enum reg_note, ds_t);
842 extern void add_or_update_back_forw_dep (rtx, rtx, enum reg_note, ds_t);
843 extern void add_back_forw_dep (rtx, rtx, enum reg_note, ds_t);
844 extern void delete_back_forw_dep (dep_link_t);
845 extern dw_t get_dep_weak (ds_t, ds_t);
846 extern ds_t set_dep_weak (ds_t, ds_t, dw_t);
847 extern ds_t ds_merge (ds_t, ds_t);
849 /* Functions in haifa-sched.c. */
850 extern int haifa_classify_insn (rtx);
851 extern void get_ebb_head_tail (basic_block, basic_block, rtx *, rtx *);
852 extern int no_real_insns_p (rtx, rtx);
854 extern void rm_other_notes (rtx, rtx);
856 extern int insn_cost (rtx);
857 extern int dep_cost (dep_t);
858 extern int set_priorities (rtx, rtx);
860 extern void schedule_block (basic_block *, int);
861 extern void sched_init (void);
862 extern void sched_finish (void);
864 extern int try_ready (rtx);
865 extern void * xrecalloc (void *, size_t, size_t, size_t);
866 extern void unlink_bb_notes (basic_block, basic_block);
867 extern void add_block (basic_block, basic_block);
868 extern void attach_life_info (void);
869 extern rtx bb_note (basic_block);
871 #ifdef ENABLE_CHECKING
872 extern void check_reg_live (bool);
873 #endif
875 #endif /* GCC_SCHED_INT_H */