1 /* Global common subexpression elimination/Partial redundancy elimination
2 and global constant/copy propagation for GNU compiler.
3 Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
4 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 - reordering of memory allocation and freeing to be more space efficient
24 - do rough calc of how many regs are needed in each block, and a rough
25 calc of how many regs are available in each class and use that to
26 throttle back the code in cases where RTX_COST is minimal.
27 - a store to the same address as a load does not kill the load if the
28 source of the store is also the destination of the load. Handling this
29 allows more load motion, particularly out of loops.
33 /* References searched while implementing this.
35 Compilers Principles, Techniques and Tools
39 Global Optimization by Suppression of Partial Redundancies
41 communications of the acm, Vol. 22, Num. 2, Feb. 1979
43 A Portable Machine-Independent Global Optimizer - Design and Measurements
45 Stanford Ph.D. thesis, Dec. 1983
47 A Fast Algorithm for Code Movement Optimization
49 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
51 A Solution to a Problem with Morel and Renvoise's
52 Global Optimization by Suppression of Partial Redundancies
53 K-H Drechsler, M.P. Stadel
54 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
56 Practical Adaptation of the Global Optimization
57 Algorithm of Morel and Renvoise
59 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
61 Efficiently Computing Static Single Assignment Form and the Control
63 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
64 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
67 J. Knoop, O. Ruthing, B. Steffen
68 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
70 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
71 Time for Reducible Flow Control
73 ACM Letters on Programming Languages and Systems,
74 Vol. 2, Num. 1-4, Mar-Dec 1993
76 An Efficient Representation for Sparse Sets
77 Preston Briggs, Linda Torczon
78 ACM Letters on Programming Languages and Systems,
79 Vol. 2, Num. 1-4, Mar-Dec 1993
81 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
82 K-H Drechsler, M.P. Stadel
83 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
85 Partial Dead Code Elimination
86 J. Knoop, O. Ruthing, B. Steffen
87 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
89 Effective Partial Redundancy Elimination
90 P. Briggs, K.D. Cooper
91 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
93 The Program Structure Tree: Computing Control Regions in Linear Time
94 R. Johnson, D. Pearson, K. Pingali
95 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
97 Optimal Code Motion: Theory and Practice
98 J. Knoop, O. Ruthing, B. Steffen
99 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
101 The power of assignment motion
102 J. Knoop, O. Ruthing, B. Steffen
103 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
105 Global code motion / global value numbering
107 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
109 Value Driven Redundancy Elimination
111 Rice University Ph.D. thesis, Apr. 1996
115 Massively Scalar Compiler Project, Rice University, Sep. 1996
117 High Performance Compilers for Parallel Computing
121 Advanced Compiler Design and Implementation
123 Morgan Kaufmann, 1997
125 Building an Optimizing Compiler
129 People wishing to speed up the code here should read:
130 Elimination Algorithms for Data Flow Analysis
131 B.G. Ryder, M.C. Paull
132 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
134 How to Analyze Large Programs Efficiently and Informatively
135 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
136 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
138 People wishing to do something different can find various possibilities
139 in the above papers and elsewhere.
144 #include "coretypes.h"
152 #include "hard-reg-set.h"
155 #include "insn-config.h"
157 #include "basic-block.h"
159 #include "function.h"
168 #include "tree-pass.h"
174 /* Propagate flow information through back edges and thus enable PRE's
175 moving loop invariant calculations out of loops.
177 Originally this tended to create worse overall code, but several
178 improvements during the development of PRE seem to have made following
179 back edges generally a win.
181 Note much of the loop invariant code motion done here would normally
182 be done by loop.c, which has more heuristics for when to move invariants
183 out of loops. At some point we might need to move some of those
184 heuristics into gcse.c. */
186 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
187 are a superset of those done by GCSE.
189 We perform the following steps:
191 1) Compute table of places where registers are set.
193 2) Perform copy/constant propagation.
195 3) Perform global cse using lazy code motion if not optimizing
196 for size, or code hoisting if we are.
198 4) Perform another pass of copy/constant propagation. Try to bypass
199 conditional jumps if the condition can be computed from a value of
202 5) Perform store motion.
204 Two passes of copy/constant propagation are done because the first one
205 enables more GCSE and the second one helps to clean up the copies that
206 GCSE creates. This is needed more for PRE than for Classic because Classic
207 GCSE will try to use an existing register containing the common
208 subexpression rather than create a new one. This is harder to do for PRE
209 because of the code motion (which Classic GCSE doesn't do).
211 Expressions we are interested in GCSE-ing are of the form
212 (set (pseudo-reg) (expression)).
213 Function want_to_gcse_p says what these are.
215 In addition, expressions in REG_EQUAL notes are candidates for GXSE-ing.
216 This allows PRE to hoist expressions that are expressed in multiple insns,
217 such as comprex address calculations (e.g. for PIC code, or loads with a
218 high part and as lowe part).
220 PRE handles moving invariant expressions out of loops (by treating them as
221 partially redundant).
223 Eventually it would be nice to replace cse.c/gcse.c with SSA (static single
224 assignment) based GVN (global value numbering). L. T. Simpson's paper
225 (Rice University) on value numbering is a useful reference for this.
227 **********************
229 We used to support multiple passes but there are diminishing returns in
230 doing so. The first pass usually makes 90% of the changes that are doable.
231 A second pass can make a few more changes made possible by the first pass.
232 Experiments show any further passes don't make enough changes to justify
235 A study of spec92 using an unlimited number of passes:
236 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
237 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
238 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
240 It was found doing copy propagation between each pass enables further
243 This study was done before expressions in REG_EQUAL notes were added as
244 candidate expressions for optimization, and before the GIMPLE optimizers
245 were added. Probably, multiple passes is even less efficient now than
246 at the time when the study was conducted.
248 PRE is quite expensive in complicated functions because the DFA can take
249 a while to converge. Hence we only perform one pass.
251 **********************
253 The steps for PRE are:
255 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
257 2) Perform the data flow analysis for PRE.
259 3) Delete the redundant instructions
261 4) Insert the required copies [if any] that make the partially
262 redundant instructions fully redundant.
264 5) For other reaching expressions, insert an instruction to copy the value
265 to a newly created pseudo that will reach the redundant instruction.
267 The deletion is done first so that when we do insertions we
268 know which pseudo reg to use.
270 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
271 argue it is not. The number of iterations for the algorithm to converge
272 is typically 2-4 so I don't view it as that expensive (relatively speaking).
274 PRE GCSE depends heavily on the second CSE pass to clean up the copies
275 we create. To make an expression reach the place where it's redundant,
276 the result of the expression is copied to a new register, and the redundant
277 expression is deleted by replacing it with this new register. Classic GCSE
278 doesn't have this problem as much as it computes the reaching defs of
279 each register in each block and thus can try to use an existing
282 /* GCSE global vars. */
284 /* Set to non-zero if CSE should run after all GCSE optimizations are done. */
285 int flag_rerun_cse_after_global_opts
;
287 /* An obstack for our working variables. */
288 static struct obstack gcse_obstack
;
290 struct reg_use
{rtx reg_rtx
; };
292 /* Hash table of expressions. */
296 /* The expression (SET_SRC for expressions, PATTERN for assignments). */
298 /* Index in the available expression bitmaps. */
300 /* Next entry with the same hash. */
301 struct expr
*next_same_hash
;
302 /* List of anticipatable occurrences in basic blocks in the function.
303 An "anticipatable occurrence" is one that is the first occurrence in the
304 basic block, the operands are not modified in the basic block prior
305 to the occurrence and the output is not used between the start of
306 the block and the occurrence. */
307 struct occr
*antic_occr
;
308 /* List of available occurrence in basic blocks in the function.
309 An "available occurrence" is one that is the last occurrence in the
310 basic block and the operands are not modified by following statements in
311 the basic block [including this insn]. */
312 struct occr
*avail_occr
;
313 /* Non-null if the computation is PRE redundant.
314 The value is the newly created pseudo-reg to record a copy of the
315 expression in all the places that reach the redundant copy. */
319 /* Occurrence of an expression.
320 There is one per basic block. If a pattern appears more than once the
321 last appearance is used [or first for anticipatable expressions]. */
325 /* Next occurrence of this expression. */
327 /* The insn that computes the expression. */
329 /* Nonzero if this [anticipatable] occurrence has been deleted. */
331 /* Nonzero if this [available] occurrence has been copied to
333 /* ??? This is mutually exclusive with deleted_p, so they could share
338 /* Expression and copy propagation hash tables.
339 Each hash table is an array of buckets.
340 ??? It is known that if it were an array of entries, structure elements
341 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
342 not clear whether in the final analysis a sufficient amount of memory would
343 be saved as the size of the available expression bitmaps would be larger
344 [one could build a mapping table without holes afterwards though].
345 Someday I'll perform the computation and figure it out. */
350 This is an array of `expr_hash_table_size' elements. */
353 /* Size of the hash table, in elements. */
356 /* Number of hash table elements. */
357 unsigned int n_elems
;
359 /* Whether the table is expression of copy propagation one. */
363 /* Expression hash table. */
364 static struct hash_table_d expr_hash_table
;
366 /* Copy propagation hash table. */
367 static struct hash_table_d set_hash_table
;
369 /* This is a list of expressions which are MEMs and will be used by load
371 Load motion tracks MEMs which aren't killed by
372 anything except itself. (i.e., loads and stores to a single location).
373 We can then allow movement of these MEM refs with a little special
374 allowance. (all stores copy the same value to the reaching reg used
375 for the loads). This means all values used to store into memory must have
376 no side effects so we can re-issue the setter value.
377 Store Motion uses this structure as an expression table to track stores
378 which look interesting, and might be moveable towards the exit block. */
382 struct expr
* expr
; /* Gcse expression reference for LM. */
383 rtx pattern
; /* Pattern of this mem. */
384 rtx pattern_regs
; /* List of registers mentioned by the mem. */
385 rtx loads
; /* INSN list of loads seen. */
386 rtx stores
; /* INSN list of stores seen. */
387 struct ls_expr
* next
; /* Next in the list. */
388 int invalid
; /* Invalid for some reason. */
389 int index
; /* If it maps to a bitmap index. */
390 unsigned int hash_index
; /* Index when in a hash table. */
391 rtx reaching_reg
; /* Register to use when re-writing. */
394 /* Array of implicit set patterns indexed by basic block index. */
395 static rtx
*implicit_sets
;
397 /* Head of the list of load/store memory refs. */
398 static struct ls_expr
* pre_ldst_mems
= NULL
;
400 /* Hashtable for the load/store memory refs. */
401 static htab_t pre_ldst_table
= NULL
;
403 /* Bitmap containing one bit for each register in the program.
404 Used when performing GCSE to track which registers have been set since
405 the start of the basic block. */
406 static regset reg_set_bitmap
;
408 /* Array, indexed by basic block number for a list of insns which modify
409 memory within that block. */
410 static rtx
* modify_mem_list
;
411 static bitmap modify_mem_list_set
;
413 /* This array parallels modify_mem_list, but is kept canonicalized. */
414 static rtx
* canon_modify_mem_list
;
416 /* Bitmap indexed by block numbers to record which blocks contain
418 static bitmap blocks_with_calls
;
420 /* Various variables for statistics gathering. */
422 /* Memory used in a pass.
423 This isn't intended to be absolutely precise. Its intent is only
424 to keep an eye on memory usage. */
425 static int bytes_used
;
427 /* GCSE substitutions made. */
428 static int gcse_subst_count
;
429 /* Number of copy instructions created. */
430 static int gcse_create_count
;
431 /* Number of local constants propagated. */
432 static int local_const_prop_count
;
433 /* Number of local copies propagated. */
434 static int local_copy_prop_count
;
435 /* Number of global constants propagated. */
436 static int global_const_prop_count
;
437 /* Number of global copies propagated. */
438 static int global_copy_prop_count
;
440 /* For available exprs */
441 static sbitmap
*ae_kill
;
443 static void compute_can_copy (void);
444 static void *gmalloc (size_t) ATTRIBUTE_MALLOC
;
445 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC
;
446 static void *gcse_alloc (unsigned long);
447 static void alloc_gcse_mem (void);
448 static void free_gcse_mem (void);
449 static void hash_scan_insn (rtx
, struct hash_table_d
*);
450 static void hash_scan_set (rtx
, rtx
, struct hash_table_d
*);
451 static void hash_scan_clobber (rtx
, rtx
, struct hash_table_d
*);
452 static void hash_scan_call (rtx
, rtx
, struct hash_table_d
*);
453 static int want_to_gcse_p (rtx
);
454 static bool gcse_constant_p (const_rtx
);
455 static int oprs_unchanged_p (const_rtx
, const_rtx
, int);
456 static int oprs_anticipatable_p (const_rtx
, const_rtx
);
457 static int oprs_available_p (const_rtx
, const_rtx
);
458 static void insert_expr_in_table (rtx
, enum machine_mode
, rtx
, int, int,
459 struct hash_table_d
*);
460 static void insert_set_in_table (rtx
, rtx
, struct hash_table_d
*);
461 static unsigned int hash_expr (const_rtx
, enum machine_mode
, int *, int);
462 static unsigned int hash_set (int, int);
463 static int expr_equiv_p (const_rtx
, const_rtx
);
464 static void record_last_reg_set_info (rtx
, int);
465 static void record_last_mem_set_info (rtx
);
466 static void record_last_set_info (rtx
, const_rtx
, void *);
467 static void compute_hash_table (struct hash_table_d
*);
468 static void alloc_hash_table (struct hash_table_d
*, int);
469 static void free_hash_table (struct hash_table_d
*);
470 static void compute_hash_table_work (struct hash_table_d
*);
471 static void dump_hash_table (FILE *, const char *, struct hash_table_d
*);
472 static struct expr
*lookup_set (unsigned int, struct hash_table_d
*);
473 static struct expr
*next_set (unsigned int, struct expr
*);
474 static void reset_opr_set_tables (void);
475 static int oprs_not_set_p (const_rtx
, const_rtx
);
476 static void mark_call (rtx
);
477 static void mark_set (rtx
, rtx
);
478 static void mark_clobber (rtx
, rtx
);
479 static void mark_oprs_set (rtx
);
480 static void alloc_cprop_mem (int, int);
481 static void free_cprop_mem (void);
482 static void compute_transp (const_rtx
, int, sbitmap
*, int);
483 static void compute_transpout (void);
484 static void compute_local_properties (sbitmap
*, sbitmap
*, sbitmap
*,
485 struct hash_table_d
*);
486 static void compute_cprop_data (void);
487 static void find_used_regs (rtx
*, void *);
488 static int try_replace_reg (rtx
, rtx
, rtx
);
489 static struct expr
*find_avail_set (int, rtx
);
490 static int cprop_jump (basic_block
, rtx
, rtx
, rtx
, rtx
);
491 static void mems_conflict_for_gcse_p (rtx
, const_rtx
, void *);
492 static int load_killed_in_block_p (const_basic_block
, int, const_rtx
, int);
493 static void canon_list_insert (rtx
, const_rtx
, void *);
494 static int cprop_insn (rtx
);
495 static void find_implicit_sets (void);
496 static int one_cprop_pass (void);
497 static bool constprop_register (rtx
, rtx
, rtx
);
498 static struct expr
*find_bypass_set (int, int);
499 static bool reg_killed_on_edge (const_rtx
, const_edge
);
500 static int bypass_block (basic_block
, rtx
, rtx
);
501 static int bypass_conditional_jumps (void);
502 static void alloc_pre_mem (int, int);
503 static void free_pre_mem (void);
504 static void compute_pre_data (void);
505 static int pre_expr_reaches_here_p (basic_block
, struct expr
*,
507 static void insert_insn_end_basic_block (struct expr
*, basic_block
, int);
508 static void pre_insert_copy_insn (struct expr
*, rtx
);
509 static void pre_insert_copies (void);
510 static int pre_delete (void);
511 static int pre_gcse (void);
512 static int one_pre_gcse_pass (void);
513 static void add_label_notes (rtx
, rtx
);
514 static void alloc_code_hoist_mem (int, int);
515 static void free_code_hoist_mem (void);
516 static void compute_code_hoist_vbeinout (void);
517 static void compute_code_hoist_data (void);
518 static int hoist_expr_reaches_here_p (basic_block
, int, basic_block
, char *);
519 static int hoist_code (void);
520 static int one_code_hoisting_pass (void);
521 static rtx
process_insert_insn (struct expr
*);
522 static int pre_edge_insert (struct edge_list
*, struct expr
**);
523 static int pre_expr_reaches_here_p_work (basic_block
, struct expr
*,
524 basic_block
, char *);
525 static struct ls_expr
* ldst_entry (rtx
);
526 static void free_ldst_entry (struct ls_expr
*);
527 static void free_ldst_mems (void);
528 static void print_ldst_list (FILE *);
529 static struct ls_expr
* find_rtx_in_ldst (rtx
);
530 static inline struct ls_expr
* first_ls_expr (void);
531 static inline struct ls_expr
* next_ls_expr (struct ls_expr
*);
532 static int simple_mem (const_rtx
);
533 static void invalidate_any_buried_refs (rtx
);
534 static void compute_ld_motion_mems (void);
535 static void trim_ld_motion_mems (void);
536 static void update_ld_motion_stores (struct expr
*);
537 static void free_insn_expr_list_list (rtx
*);
538 static void clear_modify_mem_tables (void);
539 static void free_modify_mem_tables (void);
540 static rtx
gcse_emit_move_after (rtx
, rtx
, rtx
);
541 static void local_cprop_find_used_regs (rtx
*, void *);
542 static bool do_local_cprop (rtx
, rtx
);
543 static int local_cprop_pass (void);
544 static bool is_too_expensive (const char *);
546 #define GNEW(T) ((T *) gmalloc (sizeof (T)))
547 #define GCNEW(T) ((T *) gcalloc (1, sizeof (T)))
549 #define GNEWVEC(T, N) ((T *) gmalloc (sizeof (T) * (N)))
550 #define GCNEWVEC(T, N) ((T *) gcalloc ((N), sizeof (T)))
552 #define GNEWVAR(T, S) ((T *) gmalloc ((S)))
553 #define GCNEWVAR(T, S) ((T *) gcalloc (1, (S)))
555 #define GOBNEW(T) ((T *) gcse_alloc (sizeof (T)))
556 #define GOBNEWVAR(T, S) ((T *) gcse_alloc ((S)))
558 /* Misc. utilities. */
560 /* Nonzero for each mode that supports (set (reg) (reg)).
561 This is trivially true for integer and floating point values.
562 It may or may not be true for condition codes. */
563 static char can_copy
[(int) NUM_MACHINE_MODES
];
565 /* Compute which modes support reg/reg copy operations. */
568 compute_can_copy (void)
571 #ifndef AVOID_CCMODE_COPIES
574 memset (can_copy
, 0, NUM_MACHINE_MODES
);
577 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
578 if (GET_MODE_CLASS (i
) == MODE_CC
)
580 #ifdef AVOID_CCMODE_COPIES
583 reg
= gen_rtx_REG ((enum machine_mode
) i
, LAST_VIRTUAL_REGISTER
+ 1);
584 insn
= emit_insn (gen_rtx_SET (VOIDmode
, reg
, reg
));
585 if (recog (PATTERN (insn
), insn
, NULL
) >= 0)
595 /* Returns whether the mode supports reg/reg copy operations. */
598 can_copy_p (enum machine_mode mode
)
600 static bool can_copy_init_p
= false;
602 if (! can_copy_init_p
)
605 can_copy_init_p
= true;
608 return can_copy
[mode
] != 0;
612 /* Cover function to xmalloc to record bytes allocated. */
615 gmalloc (size_t size
)
618 return xmalloc (size
);
621 /* Cover function to xcalloc to record bytes allocated. */
624 gcalloc (size_t nelem
, size_t elsize
)
626 bytes_used
+= nelem
* elsize
;
627 return xcalloc (nelem
, elsize
);
630 /* Cover function to obstack_alloc. */
633 gcse_alloc (unsigned long size
)
636 return obstack_alloc (&gcse_obstack
, size
);
639 /* Allocate memory for the reg/memory set tracking tables.
640 This is called at the start of each pass. */
643 alloc_gcse_mem (void)
645 /* Allocate vars to track sets of regs. */
646 reg_set_bitmap
= BITMAP_ALLOC (NULL
);
648 /* Allocate array to keep a list of insns which modify memory in each
650 modify_mem_list
= GCNEWVEC (rtx
, last_basic_block
);
651 canon_modify_mem_list
= GCNEWVEC (rtx
, last_basic_block
);
652 modify_mem_list_set
= BITMAP_ALLOC (NULL
);
653 blocks_with_calls
= BITMAP_ALLOC (NULL
);
656 /* Free memory allocated by alloc_gcse_mem. */
661 free_modify_mem_tables ();
662 BITMAP_FREE (modify_mem_list_set
);
663 BITMAP_FREE (blocks_with_calls
);
666 /* Compute the local properties of each recorded expression.
668 Local properties are those that are defined by the block, irrespective of
671 An expression is transparent in a block if its operands are not modified
674 An expression is computed (locally available) in a block if it is computed
675 at least once and expression would contain the same value if the
676 computation was moved to the end of the block.
678 An expression is locally anticipatable in a block if it is computed at
679 least once and expression would contain the same value if the computation
680 was moved to the beginning of the block.
682 We call this routine for cprop, pre and code hoisting. They all compute
683 basically the same information and thus can easily share this code.
685 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
686 properties. If NULL, then it is not necessary to compute or record that
689 TABLE controls which hash table to look at. If it is set hash table,
690 additionally, TRANSP is computed as ~TRANSP, since this is really cprop's
694 compute_local_properties (sbitmap
*transp
, sbitmap
*comp
, sbitmap
*antloc
,
695 struct hash_table_d
*table
)
699 /* Initialize any bitmaps that were passed in. */
703 sbitmap_vector_zero (transp
, last_basic_block
);
705 sbitmap_vector_ones (transp
, last_basic_block
);
709 sbitmap_vector_zero (comp
, last_basic_block
);
711 sbitmap_vector_zero (antloc
, last_basic_block
);
713 for (i
= 0; i
< table
->size
; i
++)
717 for (expr
= table
->table
[i
]; expr
!= NULL
; expr
= expr
->next_same_hash
)
719 int indx
= expr
->bitmap_index
;
722 /* The expression is transparent in this block if it is not killed.
723 We start by assuming all are transparent [none are killed], and
724 then reset the bits for those that are. */
726 compute_transp (expr
->expr
, indx
, transp
, table
->set_p
);
728 /* The occurrences recorded in antic_occr are exactly those that
729 we want to set to nonzero in ANTLOC. */
731 for (occr
= expr
->antic_occr
; occr
!= NULL
; occr
= occr
->next
)
733 SET_BIT (antloc
[BLOCK_NUM (occr
->insn
)], indx
);
735 /* While we're scanning the table, this is a good place to
740 /* The occurrences recorded in avail_occr are exactly those that
741 we want to set to nonzero in COMP. */
743 for (occr
= expr
->avail_occr
; occr
!= NULL
; occr
= occr
->next
)
745 SET_BIT (comp
[BLOCK_NUM (occr
->insn
)], indx
);
747 /* While we're scanning the table, this is a good place to
752 /* While we're scanning the table, this is a good place to
754 expr
->reaching_reg
= 0;
759 /* Hash table support. */
761 struct reg_avail_info
768 static struct reg_avail_info
*reg_avail_info
;
769 static basic_block current_bb
;
772 /* See whether X, the source of a set, is something we want to consider for
776 want_to_gcse_p (rtx x
)
779 /* On register stack architectures, don't GCSE constants from the
780 constant pool, as the benefits are often swamped by the overhead
781 of shuffling the register stack between basic blocks. */
782 if (IS_STACK_MODE (GET_MODE (x
)))
783 x
= avoid_constant_pool_reference (x
);
786 switch (GET_CODE (x
))
798 return can_assign_to_reg_without_clobbers_p (x
);
802 /* Used internally by can_assign_to_reg_without_clobbers_p. */
804 static GTY(()) rtx test_insn
;
806 /* Return true if we can assign X to a pseudo register such that the
807 resulting insn does not result in clobbering a hard register as a
810 Additionally, if the target requires it, check that the resulting insn
811 can be copied. If it cannot, this means that X is special and probably
812 has hidden side-effects we don't want to mess with.
814 This function is typically used by code motion passes, to verify
815 that it is safe to insert an insn without worrying about clobbering
816 maybe live hard regs. */
819 can_assign_to_reg_without_clobbers_p (rtx x
)
821 int num_clobbers
= 0;
824 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
825 if (general_operand (x
, GET_MODE (x
)))
827 else if (GET_MODE (x
) == VOIDmode
)
830 /* Otherwise, check if we can make a valid insn from it. First initialize
831 our test insn if we haven't already. */
835 = make_insn_raw (gen_rtx_SET (VOIDmode
,
836 gen_rtx_REG (word_mode
,
837 FIRST_PSEUDO_REGISTER
* 2),
839 NEXT_INSN (test_insn
) = PREV_INSN (test_insn
) = 0;
842 /* Now make an insn like the one we would make when GCSE'ing and see if
844 PUT_MODE (SET_DEST (PATTERN (test_insn
)), GET_MODE (x
));
845 SET_SRC (PATTERN (test_insn
)) = x
;
847 icode
= recog (PATTERN (test_insn
), test_insn
, &num_clobbers
);
851 if (num_clobbers
> 0 && added_clobbers_hard_reg_p (icode
))
854 if (targetm
.cannot_copy_insn_p
&& targetm
.cannot_copy_insn_p (test_insn
))
860 /* Return nonzero if the operands of expression X are unchanged from the
861 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
862 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
865 oprs_unchanged_p (const_rtx x
, const_rtx insn
, int avail_p
)
879 struct reg_avail_info
*info
= ®_avail_info
[REGNO (x
)];
881 if (info
->last_bb
!= current_bb
)
884 return info
->last_set
< DF_INSN_LUID (insn
);
886 return info
->first_set
>= DF_INSN_LUID (insn
);
890 if (load_killed_in_block_p (current_bb
, DF_INSN_LUID (insn
),
894 return oprs_unchanged_p (XEXP (x
, 0), insn
, avail_p
);
921 for (i
= GET_RTX_LENGTH (code
) - 1, fmt
= GET_RTX_FORMAT (code
); i
>= 0; i
--)
925 /* If we are about to do the last recursive call needed at this
926 level, change it into iteration. This function is called enough
929 return oprs_unchanged_p (XEXP (x
, i
), insn
, avail_p
);
931 else if (! oprs_unchanged_p (XEXP (x
, i
), insn
, avail_p
))
934 else if (fmt
[i
] == 'E')
935 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
936 if (! oprs_unchanged_p (XVECEXP (x
, i
, j
), insn
, avail_p
))
943 /* Used for communication between mems_conflict_for_gcse_p and
944 load_killed_in_block_p. Nonzero if mems_conflict_for_gcse_p finds a
945 conflict between two memory references. */
946 static int gcse_mems_conflict_p
;
948 /* Used for communication between mems_conflict_for_gcse_p and
949 load_killed_in_block_p. A memory reference for a load instruction,
950 mems_conflict_for_gcse_p will see if a memory store conflicts with
952 static const_rtx gcse_mem_operand
;
954 /* DEST is the output of an instruction. If it is a memory reference, and
955 possibly conflicts with the load found in gcse_mem_operand, then set
956 gcse_mems_conflict_p to a nonzero value. */
959 mems_conflict_for_gcse_p (rtx dest
, const_rtx setter ATTRIBUTE_UNUSED
,
960 void *data ATTRIBUTE_UNUSED
)
962 while (GET_CODE (dest
) == SUBREG
963 || GET_CODE (dest
) == ZERO_EXTRACT
964 || GET_CODE (dest
) == STRICT_LOW_PART
)
965 dest
= XEXP (dest
, 0);
967 /* If DEST is not a MEM, then it will not conflict with the load. Note
968 that function calls are assumed to clobber memory, but are handled
973 /* If we are setting a MEM in our list of specially recognized MEMs,
974 don't mark as killed this time. */
976 if (expr_equiv_p (dest
, gcse_mem_operand
) && pre_ldst_mems
!= NULL
)
978 if (!find_rtx_in_ldst (dest
))
979 gcse_mems_conflict_p
= 1;
983 if (true_dependence (dest
, GET_MODE (dest
), gcse_mem_operand
,
985 gcse_mems_conflict_p
= 1;
988 /* Return nonzero if the expression in X (a memory reference) is killed
989 in block BB before or after the insn with the LUID in UID_LIMIT.
990 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
993 To check the entire block, set UID_LIMIT to max_uid + 1 and
997 load_killed_in_block_p (const_basic_block bb
, int uid_limit
, const_rtx x
, int avail_p
)
999 rtx list_entry
= modify_mem_list
[bb
->index
];
1001 /* If this is a readonly then we aren't going to be changing it. */
1002 if (MEM_READONLY_P (x
))
1008 /* Ignore entries in the list that do not apply. */
1010 && DF_INSN_LUID (XEXP (list_entry
, 0)) < uid_limit
)
1012 && DF_INSN_LUID (XEXP (list_entry
, 0)) > uid_limit
))
1014 list_entry
= XEXP (list_entry
, 1);
1018 setter
= XEXP (list_entry
, 0);
1020 /* If SETTER is a call everything is clobbered. Note that calls
1021 to pure functions are never put on the list, so we need not
1022 worry about them. */
1023 if (CALL_P (setter
))
1026 /* SETTER must be an INSN of some kind that sets memory. Call
1027 note_stores to examine each hunk of memory that is modified.
1029 The note_stores interface is pretty limited, so we have to
1030 communicate via global variables. Yuk. */
1031 gcse_mem_operand
= x
;
1032 gcse_mems_conflict_p
= 0;
1033 note_stores (PATTERN (setter
), mems_conflict_for_gcse_p
, NULL
);
1034 if (gcse_mems_conflict_p
)
1036 list_entry
= XEXP (list_entry
, 1);
1041 /* Return nonzero if the operands of expression X are unchanged from
1042 the start of INSN's basic block up to but not including INSN. */
1045 oprs_anticipatable_p (const_rtx x
, const_rtx insn
)
1047 return oprs_unchanged_p (x
, insn
, 0);
1050 /* Return nonzero if the operands of expression X are unchanged from
1051 INSN to the end of INSN's basic block. */
1054 oprs_available_p (const_rtx x
, const_rtx insn
)
1056 return oprs_unchanged_p (x
, insn
, 1);
1059 /* Hash expression X.
1061 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1062 indicating if a volatile operand is found or if the expression contains
1063 something we don't want to insert in the table. HASH_TABLE_SIZE is
1064 the current size of the hash table to be probed. */
1067 hash_expr (const_rtx x
, enum machine_mode mode
, int *do_not_record_p
,
1068 int hash_table_size
)
1072 *do_not_record_p
= 0;
1074 hash
= hash_rtx (x
, mode
, do_not_record_p
,
1075 NULL
, /*have_reg_qty=*/false);
1076 return hash
% hash_table_size
;
1079 /* Hash a set of register REGNO.
1081 Sets are hashed on the register that is set. This simplifies the PRE copy
1084 ??? May need to make things more elaborate. Later, as necessary. */
1087 hash_set (int regno
, int hash_table_size
)
1092 return hash
% hash_table_size
;
1095 /* Return nonzero if exp1 is equivalent to exp2. */
1098 expr_equiv_p (const_rtx x
, const_rtx y
)
1100 return exp_equiv_p (x
, y
, 0, true);
1103 /* Insert expression X in INSN in the hash TABLE.
1104 If it is already present, record it as the last occurrence in INSN's
1107 MODE is the mode of the value X is being stored into.
1108 It is only used if X is a CONST_INT.
1110 ANTIC_P is nonzero if X is an anticipatable expression.
1111 AVAIL_P is nonzero if X is an available expression. */
1114 insert_expr_in_table (rtx x
, enum machine_mode mode
, rtx insn
, int antic_p
,
1115 int avail_p
, struct hash_table_d
*table
)
1117 int found
, do_not_record_p
;
1119 struct expr
*cur_expr
, *last_expr
= NULL
;
1120 struct occr
*antic_occr
, *avail_occr
;
1122 hash
= hash_expr (x
, mode
, &do_not_record_p
, table
->size
);
1124 /* Do not insert expression in table if it contains volatile operands,
1125 or if hash_expr determines the expression is something we don't want
1126 to or can't handle. */
1127 if (do_not_record_p
)
1130 cur_expr
= table
->table
[hash
];
1133 while (cur_expr
&& 0 == (found
= expr_equiv_p (cur_expr
->expr
, x
)))
1135 /* If the expression isn't found, save a pointer to the end of
1137 last_expr
= cur_expr
;
1138 cur_expr
= cur_expr
->next_same_hash
;
1143 cur_expr
= GOBNEW (struct expr
);
1144 bytes_used
+= sizeof (struct expr
);
1145 if (table
->table
[hash
] == NULL
)
1146 /* This is the first pattern that hashed to this index. */
1147 table
->table
[hash
] = cur_expr
;
1149 /* Add EXPR to end of this hash chain. */
1150 last_expr
->next_same_hash
= cur_expr
;
1152 /* Set the fields of the expr element. */
1154 cur_expr
->bitmap_index
= table
->n_elems
++;
1155 cur_expr
->next_same_hash
= NULL
;
1156 cur_expr
->antic_occr
= NULL
;
1157 cur_expr
->avail_occr
= NULL
;
1160 /* Now record the occurrence(s). */
1163 antic_occr
= cur_expr
->antic_occr
;
1165 if (antic_occr
&& BLOCK_NUM (antic_occr
->insn
) != BLOCK_NUM (insn
))
1169 /* Found another instance of the expression in the same basic block.
1170 Prefer the currently recorded one. We want the first one in the
1171 block and the block is scanned from start to end. */
1172 ; /* nothing to do */
1175 /* First occurrence of this expression in this basic block. */
1176 antic_occr
= GOBNEW (struct occr
);
1177 bytes_used
+= sizeof (struct occr
);
1178 antic_occr
->insn
= insn
;
1179 antic_occr
->next
= cur_expr
->antic_occr
;
1180 antic_occr
->deleted_p
= 0;
1181 cur_expr
->antic_occr
= antic_occr
;
1187 avail_occr
= cur_expr
->avail_occr
;
1189 if (avail_occr
&& BLOCK_NUM (avail_occr
->insn
) == BLOCK_NUM (insn
))
1191 /* Found another instance of the expression in the same basic block.
1192 Prefer this occurrence to the currently recorded one. We want
1193 the last one in the block and the block is scanned from start
1195 avail_occr
->insn
= insn
;
1199 /* First occurrence of this expression in this basic block. */
1200 avail_occr
= GOBNEW (struct occr
);
1201 bytes_used
+= sizeof (struct occr
);
1202 avail_occr
->insn
= insn
;
1203 avail_occr
->next
= cur_expr
->avail_occr
;
1204 avail_occr
->deleted_p
= 0;
1205 cur_expr
->avail_occr
= avail_occr
;
1210 /* Insert pattern X in INSN in the hash table.
1211 X is a SET of a reg to either another reg or a constant.
1212 If it is already present, record it as the last occurrence in INSN's
1216 insert_set_in_table (rtx x
, rtx insn
, struct hash_table_d
*table
)
1220 struct expr
*cur_expr
, *last_expr
= NULL
;
1221 struct occr
*cur_occr
;
1223 gcc_assert (GET_CODE (x
) == SET
&& REG_P (SET_DEST (x
)));
1225 hash
= hash_set (REGNO (SET_DEST (x
)), table
->size
);
1227 cur_expr
= table
->table
[hash
];
1230 while (cur_expr
&& 0 == (found
= expr_equiv_p (cur_expr
->expr
, x
)))
1232 /* If the expression isn't found, save a pointer to the end of
1234 last_expr
= cur_expr
;
1235 cur_expr
= cur_expr
->next_same_hash
;
1240 cur_expr
= GOBNEW (struct expr
);
1241 bytes_used
+= sizeof (struct expr
);
1242 if (table
->table
[hash
] == NULL
)
1243 /* This is the first pattern that hashed to this index. */
1244 table
->table
[hash
] = cur_expr
;
1246 /* Add EXPR to end of this hash chain. */
1247 last_expr
->next_same_hash
= cur_expr
;
1249 /* Set the fields of the expr element.
1250 We must copy X because it can be modified when copy propagation is
1251 performed on its operands. */
1252 cur_expr
->expr
= copy_rtx (x
);
1253 cur_expr
->bitmap_index
= table
->n_elems
++;
1254 cur_expr
->next_same_hash
= NULL
;
1255 cur_expr
->antic_occr
= NULL
;
1256 cur_expr
->avail_occr
= NULL
;
1259 /* Now record the occurrence. */
1260 cur_occr
= cur_expr
->avail_occr
;
1262 if (cur_occr
&& BLOCK_NUM (cur_occr
->insn
) == BLOCK_NUM (insn
))
1264 /* Found another instance of the expression in the same basic block.
1265 Prefer this occurrence to the currently recorded one. We want
1266 the last one in the block and the block is scanned from start
1268 cur_occr
->insn
= insn
;
1272 /* First occurrence of this expression in this basic block. */
1273 cur_occr
= GOBNEW (struct occr
);
1274 bytes_used
+= sizeof (struct occr
);
1275 cur_occr
->insn
= insn
;
1276 cur_occr
->next
= cur_expr
->avail_occr
;
1277 cur_occr
->deleted_p
= 0;
1278 cur_expr
->avail_occr
= cur_occr
;
1282 /* Determine whether the rtx X should be treated as a constant for
1283 the purposes of GCSE's constant propagation. */
1286 gcse_constant_p (const_rtx x
)
1288 /* Consider a COMPARE of two integers constant. */
1289 if (GET_CODE (x
) == COMPARE
1290 && CONST_INT_P (XEXP (x
, 0))
1291 && CONST_INT_P (XEXP (x
, 1)))
1294 /* Consider a COMPARE of the same registers is a constant
1295 if they are not floating point registers. */
1296 if (GET_CODE(x
) == COMPARE
1297 && REG_P (XEXP (x
, 0)) && REG_P (XEXP (x
, 1))
1298 && REGNO (XEXP (x
, 0)) == REGNO (XEXP (x
, 1))
1299 && ! FLOAT_MODE_P (GET_MODE (XEXP (x
, 0)))
1300 && ! FLOAT_MODE_P (GET_MODE (XEXP (x
, 1))))
1303 /* Since X might be inserted more than once we have to take care that it
1305 return CONSTANT_P (x
) && (GET_CODE (x
) != CONST
|| shared_const_p (x
));
1308 /* Scan pattern PAT of INSN and add an entry to the hash TABLE (set or
1312 hash_scan_set (rtx pat
, rtx insn
, struct hash_table_d
*table
)
1314 rtx src
= SET_SRC (pat
);
1315 rtx dest
= SET_DEST (pat
);
1318 if (GET_CODE (src
) == CALL
)
1319 hash_scan_call (src
, insn
, table
);
1321 else if (REG_P (dest
))
1323 unsigned int regno
= REGNO (dest
);
1326 /* See if a REG_EQUAL note shows this equivalent to a simpler expression.
1328 This allows us to do a single GCSE pass and still eliminate
1329 redundant constants, addresses or other expressions that are
1330 constructed with multiple instructions.
1332 However, keep the original SRC if INSN is a simple reg-reg move. In
1333 In this case, there will almost always be a REG_EQUAL note on the
1334 insn that sets SRC. By recording the REG_EQUAL value here as SRC
1335 for INSN, we miss copy propagation opportunities and we perform the
1336 same PRE GCSE operation repeatedly on the same REG_EQUAL value if we
1337 do more than one PRE GCSE pass.
1339 Note that this does not impede profitable constant propagations. We
1340 "look through" reg-reg sets in lookup_avail_set. */
1341 note
= find_reg_equal_equiv_note (insn
);
1343 && REG_NOTE_KIND (note
) == REG_EQUAL
1346 ? gcse_constant_p (XEXP (note
, 0))
1347 : want_to_gcse_p (XEXP (note
, 0))))
1348 src
= XEXP (note
, 0), pat
= gen_rtx_SET (VOIDmode
, dest
, src
);
1350 /* Only record sets of pseudo-regs in the hash table. */
1352 && regno
>= FIRST_PSEUDO_REGISTER
1353 /* Don't GCSE something if we can't do a reg/reg copy. */
1354 && can_copy_p (GET_MODE (dest
))
1355 /* GCSE commonly inserts instruction after the insn. We can't
1356 do that easily for EH edges so disable GCSE on these for now. */
1357 /* ??? We can now easily create new EH landing pads at the
1358 gimple level, for splitting edges; there's no reason we
1359 can't do the same thing at the rtl level. */
1360 && !can_throw_internal (insn
)
1361 /* Is SET_SRC something we want to gcse? */
1362 && want_to_gcse_p (src
)
1363 /* Don't CSE a nop. */
1364 && ! set_noop_p (pat
)
1365 /* Don't GCSE if it has attached REG_EQUIV note.
1366 At this point this only function parameters should have
1367 REG_EQUIV notes and if the argument slot is used somewhere
1368 explicitly, it means address of parameter has been taken,
1369 so we should not extend the lifetime of the pseudo. */
1370 && (note
== NULL_RTX
|| ! MEM_P (XEXP (note
, 0))))
1372 /* An expression is not anticipatable if its operands are
1373 modified before this insn or if this is not the only SET in
1374 this insn. The latter condition does not have to mean that
1375 SRC itself is not anticipatable, but we just will not be
1376 able to handle code motion of insns with multiple sets. */
1377 int antic_p
= oprs_anticipatable_p (src
, insn
)
1378 && !multiple_sets (insn
);
1379 /* An expression is not available if its operands are
1380 subsequently modified, including this insn. It's also not
1381 available if this is a branch, because we can't insert
1382 a set after the branch. */
1383 int avail_p
= (oprs_available_p (src
, insn
)
1384 && ! JUMP_P (insn
));
1386 insert_expr_in_table (src
, GET_MODE (dest
), insn
, antic_p
, avail_p
, table
);
1389 /* Record sets for constant/copy propagation. */
1390 else if (table
->set_p
1391 && regno
>= FIRST_PSEUDO_REGISTER
1393 && REGNO (src
) >= FIRST_PSEUDO_REGISTER
1394 && can_copy_p (GET_MODE (dest
))
1395 && REGNO (src
) != regno
)
1396 || gcse_constant_p (src
))
1397 /* A copy is not available if its src or dest is subsequently
1398 modified. Here we want to search from INSN+1 on, but
1399 oprs_available_p searches from INSN on. */
1400 && (insn
== BB_END (BLOCK_FOR_INSN (insn
))
1401 || (tmp
= next_nonnote_insn (insn
)) == NULL_RTX
1402 || BLOCK_FOR_INSN (tmp
) != BLOCK_FOR_INSN (insn
)
1403 || oprs_available_p (pat
, tmp
)))
1404 insert_set_in_table (pat
, insn
, table
);
1406 /* In case of store we want to consider the memory value as available in
1407 the REG stored in that memory. This makes it possible to remove
1408 redundant loads from due to stores to the same location. */
1409 else if (flag_gcse_las
&& REG_P (src
) && MEM_P (dest
))
1411 unsigned int regno
= REGNO (src
);
1413 /* Do not do this for constant/copy propagation. */
1415 /* Only record sets of pseudo-regs in the hash table. */
1416 && regno
>= FIRST_PSEUDO_REGISTER
1417 /* Don't GCSE something if we can't do a reg/reg copy. */
1418 && can_copy_p (GET_MODE (src
))
1419 /* GCSE commonly inserts instruction after the insn. We can't
1420 do that easily for EH edges so disable GCSE on these for now. */
1421 && !can_throw_internal (insn
)
1422 /* Is SET_DEST something we want to gcse? */
1423 && want_to_gcse_p (dest
)
1424 /* Don't CSE a nop. */
1425 && ! set_noop_p (pat
)
1426 /* Don't GCSE if it has attached REG_EQUIV note.
1427 At this point this only function parameters should have
1428 REG_EQUIV notes and if the argument slot is used somewhere
1429 explicitly, it means address of parameter has been taken,
1430 so we should not extend the lifetime of the pseudo. */
1431 && ((note
= find_reg_note (insn
, REG_EQUIV
, NULL_RTX
)) == 0
1432 || ! MEM_P (XEXP (note
, 0))))
1434 /* Stores are never anticipatable. */
1436 /* An expression is not available if its operands are
1437 subsequently modified, including this insn. It's also not
1438 available if this is a branch, because we can't insert
1439 a set after the branch. */
1440 int avail_p
= oprs_available_p (dest
, insn
)
1443 /* Record the memory expression (DEST) in the hash table. */
1444 insert_expr_in_table (dest
, GET_MODE (dest
), insn
,
1445 antic_p
, avail_p
, table
);
1451 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED
, rtx insn ATTRIBUTE_UNUSED
,
1452 struct hash_table_d
*table ATTRIBUTE_UNUSED
)
1454 /* Currently nothing to do. */
1458 hash_scan_call (rtx x ATTRIBUTE_UNUSED
, rtx insn ATTRIBUTE_UNUSED
,
1459 struct hash_table_d
*table ATTRIBUTE_UNUSED
)
1461 /* Currently nothing to do. */
1464 /* Process INSN and add hash table entries as appropriate.
1466 Only available expressions that set a single pseudo-reg are recorded.
1468 Single sets in a PARALLEL could be handled, but it's an extra complication
1469 that isn't dealt with right now. The trick is handling the CLOBBERs that
1470 are also in the PARALLEL. Later.
1472 If SET_P is nonzero, this is for the assignment hash table,
1473 otherwise it is for the expression hash table. */
1476 hash_scan_insn (rtx insn
, struct hash_table_d
*table
)
1478 rtx pat
= PATTERN (insn
);
1481 /* Pick out the sets of INSN and for other forms of instructions record
1482 what's been modified. */
1484 if (GET_CODE (pat
) == SET
)
1485 hash_scan_set (pat
, insn
, table
);
1486 else if (GET_CODE (pat
) == PARALLEL
)
1487 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1489 rtx x
= XVECEXP (pat
, 0, i
);
1491 if (GET_CODE (x
) == SET
)
1492 hash_scan_set (x
, insn
, table
);
1493 else if (GET_CODE (x
) == CLOBBER
)
1494 hash_scan_clobber (x
, insn
, table
);
1495 else if (GET_CODE (x
) == CALL
)
1496 hash_scan_call (x
, insn
, table
);
1499 else if (GET_CODE (pat
) == CLOBBER
)
1500 hash_scan_clobber (pat
, insn
, table
);
1501 else if (GET_CODE (pat
) == CALL
)
1502 hash_scan_call (pat
, insn
, table
);
1506 dump_hash_table (FILE *file
, const char *name
, struct hash_table_d
*table
)
1509 /* Flattened out table, so it's printed in proper order. */
1510 struct expr
**flat_table
;
1511 unsigned int *hash_val
;
1514 flat_table
= XCNEWVEC (struct expr
*, table
->n_elems
);
1515 hash_val
= XNEWVEC (unsigned int, table
->n_elems
);
1517 for (i
= 0; i
< (int) table
->size
; i
++)
1518 for (expr
= table
->table
[i
]; expr
!= NULL
; expr
= expr
->next_same_hash
)
1520 flat_table
[expr
->bitmap_index
] = expr
;
1521 hash_val
[expr
->bitmap_index
] = i
;
1524 fprintf (file
, "%s hash table (%d buckets, %d entries)\n",
1525 name
, table
->size
, table
->n_elems
);
1527 for (i
= 0; i
< (int) table
->n_elems
; i
++)
1528 if (flat_table
[i
] != 0)
1530 expr
= flat_table
[i
];
1531 fprintf (file
, "Index %d (hash value %d)\n ",
1532 expr
->bitmap_index
, hash_val
[i
]);
1533 print_rtl (file
, expr
->expr
);
1534 fprintf (file
, "\n");
1537 fprintf (file
, "\n");
1543 /* Record register first/last/block set information for REGNO in INSN.
1545 first_set records the first place in the block where the register
1546 is set and is used to compute "anticipatability".
1548 last_set records the last place in the block where the register
1549 is set and is used to compute "availability".
1551 last_bb records the block for which first_set and last_set are
1552 valid, as a quick test to invalidate them. */
1555 record_last_reg_set_info (rtx insn
, int regno
)
1557 struct reg_avail_info
*info
= ®_avail_info
[regno
];
1558 int luid
= DF_INSN_LUID (insn
);
1560 info
->last_set
= luid
;
1561 if (info
->last_bb
!= current_bb
)
1563 info
->last_bb
= current_bb
;
1564 info
->first_set
= luid
;
1569 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
1570 Note we store a pair of elements in the list, so they have to be
1571 taken off pairwise. */
1574 canon_list_insert (rtx dest ATTRIBUTE_UNUSED
, const_rtx unused1 ATTRIBUTE_UNUSED
,
1577 rtx dest_addr
, insn
;
1580 while (GET_CODE (dest
) == SUBREG
1581 || GET_CODE (dest
) == ZERO_EXTRACT
1582 || GET_CODE (dest
) == STRICT_LOW_PART
)
1583 dest
= XEXP (dest
, 0);
1585 /* If DEST is not a MEM, then it will not conflict with a load. Note
1586 that function calls are assumed to clobber memory, but are handled
1592 dest_addr
= get_addr (XEXP (dest
, 0));
1593 dest_addr
= canon_rtx (dest_addr
);
1594 insn
= (rtx
) v_insn
;
1595 bb
= BLOCK_NUM (insn
);
1597 canon_modify_mem_list
[bb
] =
1598 alloc_EXPR_LIST (VOIDmode
, dest_addr
, canon_modify_mem_list
[bb
]);
1599 canon_modify_mem_list
[bb
] =
1600 alloc_EXPR_LIST (VOIDmode
, dest
, canon_modify_mem_list
[bb
]);
1603 /* Record memory modification information for INSN. We do not actually care
1604 about the memory location(s) that are set, or even how they are set (consider
1605 a CALL_INSN). We merely need to record which insns modify memory. */
1608 record_last_mem_set_info (rtx insn
)
1610 int bb
= BLOCK_NUM (insn
);
1612 /* load_killed_in_block_p will handle the case of calls clobbering
1614 modify_mem_list
[bb
] = alloc_INSN_LIST (insn
, modify_mem_list
[bb
]);
1615 bitmap_set_bit (modify_mem_list_set
, bb
);
1619 /* Note that traversals of this loop (other than for free-ing)
1620 will break after encountering a CALL_INSN. So, there's no
1621 need to insert a pair of items, as canon_list_insert does. */
1622 canon_modify_mem_list
[bb
] =
1623 alloc_INSN_LIST (insn
, canon_modify_mem_list
[bb
]);
1624 bitmap_set_bit (blocks_with_calls
, bb
);
1627 note_stores (PATTERN (insn
), canon_list_insert
, (void*) insn
);
1630 /* Called from compute_hash_table via note_stores to handle one
1631 SET or CLOBBER in an insn. DATA is really the instruction in which
1632 the SET is taking place. */
1635 record_last_set_info (rtx dest
, const_rtx setter ATTRIBUTE_UNUSED
, void *data
)
1637 rtx last_set_insn
= (rtx
) data
;
1639 if (GET_CODE (dest
) == SUBREG
)
1640 dest
= SUBREG_REG (dest
);
1643 record_last_reg_set_info (last_set_insn
, REGNO (dest
));
1644 else if (MEM_P (dest
)
1645 /* Ignore pushes, they clobber nothing. */
1646 && ! push_operand (dest
, GET_MODE (dest
)))
1647 record_last_mem_set_info (last_set_insn
);
1650 /* Top level function to create an expression or assignment hash table.
1652 Expression entries are placed in the hash table if
1653 - they are of the form (set (pseudo-reg) src),
1654 - src is something we want to perform GCSE on,
1655 - none of the operands are subsequently modified in the block
1657 Assignment entries are placed in the hash table if
1658 - they are of the form (set (pseudo-reg) src),
1659 - src is something we want to perform const/copy propagation on,
1660 - none of the operands or target are subsequently modified in the block
1662 Currently src must be a pseudo-reg or a const_int.
1664 TABLE is the table computed. */
1667 compute_hash_table_work (struct hash_table_d
*table
)
1671 /* re-Cache any INSN_LIST nodes we have allocated. */
1672 clear_modify_mem_tables ();
1673 /* Some working arrays used to track first and last set in each block. */
1674 reg_avail_info
= GNEWVEC (struct reg_avail_info
, max_reg_num ());
1676 for (i
= 0; i
< max_reg_num (); ++i
)
1677 reg_avail_info
[i
].last_bb
= NULL
;
1679 FOR_EACH_BB (current_bb
)
1684 /* First pass over the instructions records information used to
1685 determine when registers and memory are first and last set. */
1686 FOR_BB_INSNS (current_bb
, insn
)
1688 if (! INSN_P (insn
))
1693 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
1694 if (TEST_HARD_REG_BIT (regs_invalidated_by_call
, regno
))
1695 record_last_reg_set_info (insn
, regno
);
1700 note_stores (PATTERN (insn
), record_last_set_info
, insn
);
1703 /* Insert implicit sets in the hash table. */
1705 && implicit_sets
[current_bb
->index
] != NULL_RTX
)
1706 hash_scan_set (implicit_sets
[current_bb
->index
],
1707 BB_HEAD (current_bb
), table
);
1709 /* The next pass builds the hash table. */
1710 FOR_BB_INSNS (current_bb
, insn
)
1712 hash_scan_insn (insn
, table
);
1715 free (reg_avail_info
);
1716 reg_avail_info
= NULL
;
1719 /* Allocate space for the set/expr hash TABLE.
1720 It is used to determine the number of buckets to use.
1721 SET_P determines whether set or expression table will
1725 alloc_hash_table (struct hash_table_d
*table
, int set_p
)
1729 n
= get_max_insn_count ();
1731 table
->size
= n
/ 4;
1732 if (table
->size
< 11)
1735 /* Attempt to maintain efficient use of hash table.
1736 Making it an odd number is simplest for now.
1737 ??? Later take some measurements. */
1739 n
= table
->size
* sizeof (struct expr
*);
1740 table
->table
= GNEWVAR (struct expr
*, n
);
1741 table
->set_p
= set_p
;
1744 /* Free things allocated by alloc_hash_table. */
1747 free_hash_table (struct hash_table_d
*table
)
1749 free (table
->table
);
1752 /* Compute the hash TABLE for doing copy/const propagation or
1753 expression hash table. */
1756 compute_hash_table (struct hash_table_d
*table
)
1758 /* Initialize count of number of entries in hash table. */
1760 memset (table
->table
, 0, table
->size
* sizeof (struct expr
*));
1762 compute_hash_table_work (table
);
1765 /* Expression tracking support. */
1767 /* Lookup REGNO in the set TABLE. The result is a pointer to the
1768 table entry, or NULL if not found. */
1770 static struct expr
*
1771 lookup_set (unsigned int regno
, struct hash_table_d
*table
)
1773 unsigned int hash
= hash_set (regno
, table
->size
);
1776 expr
= table
->table
[hash
];
1778 while (expr
&& REGNO (SET_DEST (expr
->expr
)) != regno
)
1779 expr
= expr
->next_same_hash
;
1784 /* Return the next entry for REGNO in list EXPR. */
1786 static struct expr
*
1787 next_set (unsigned int regno
, struct expr
*expr
)
1790 expr
= expr
->next_same_hash
;
1791 while (expr
&& REGNO (SET_DEST (expr
->expr
)) != regno
);
1796 /* Like free_INSN_LIST_list or free_EXPR_LIST_list, except that the node
1797 types may be mixed. */
1800 free_insn_expr_list_list (rtx
*listp
)
1804 for (list
= *listp
; list
; list
= next
)
1806 next
= XEXP (list
, 1);
1807 if (GET_CODE (list
) == EXPR_LIST
)
1808 free_EXPR_LIST_node (list
);
1810 free_INSN_LIST_node (list
);
1816 /* Clear canon_modify_mem_list and modify_mem_list tables. */
1818 clear_modify_mem_tables (void)
1823 EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set
, 0, i
, bi
)
1825 free_INSN_LIST_list (modify_mem_list
+ i
);
1826 free_insn_expr_list_list (canon_modify_mem_list
+ i
);
1828 bitmap_clear (modify_mem_list_set
);
1829 bitmap_clear (blocks_with_calls
);
1832 /* Release memory used by modify_mem_list_set. */
1835 free_modify_mem_tables (void)
1837 clear_modify_mem_tables ();
1838 free (modify_mem_list
);
1839 free (canon_modify_mem_list
);
1840 modify_mem_list
= 0;
1841 canon_modify_mem_list
= 0;
1844 /* Reset tables used to keep track of what's still available [since the
1845 start of the block]. */
1848 reset_opr_set_tables (void)
1850 /* Maintain a bitmap of which regs have been set since beginning of
1852 CLEAR_REG_SET (reg_set_bitmap
);
1854 /* Also keep a record of the last instruction to modify memory.
1855 For now this is very trivial, we only record whether any memory
1856 location has been modified. */
1857 clear_modify_mem_tables ();
1860 /* Return nonzero if the operands of X are not set before INSN in
1861 INSN's basic block. */
1864 oprs_not_set_p (const_rtx x
, const_rtx insn
)
1873 code
= GET_CODE (x
);
1890 if (load_killed_in_block_p (BLOCK_FOR_INSN (insn
),
1891 DF_INSN_LUID (insn
), x
, 0))
1894 return oprs_not_set_p (XEXP (x
, 0), insn
);
1897 return ! REGNO_REG_SET_P (reg_set_bitmap
, REGNO (x
));
1903 for (i
= GET_RTX_LENGTH (code
) - 1, fmt
= GET_RTX_FORMAT (code
); i
>= 0; i
--)
1907 /* If we are about to do the last recursive call
1908 needed at this level, change it into iteration.
1909 This function is called enough to be worth it. */
1911 return oprs_not_set_p (XEXP (x
, i
), insn
);
1913 if (! oprs_not_set_p (XEXP (x
, i
), insn
))
1916 else if (fmt
[i
] == 'E')
1917 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1918 if (! oprs_not_set_p (XVECEXP (x
, i
, j
), insn
))
1925 /* Mark things set by a CALL. */
1928 mark_call (rtx insn
)
1930 if (! RTL_CONST_OR_PURE_CALL_P (insn
))
1931 record_last_mem_set_info (insn
);
1934 /* Mark things set by a SET. */
1937 mark_set (rtx pat
, rtx insn
)
1939 rtx dest
= SET_DEST (pat
);
1941 while (GET_CODE (dest
) == SUBREG
1942 || GET_CODE (dest
) == ZERO_EXTRACT
1943 || GET_CODE (dest
) == STRICT_LOW_PART
)
1944 dest
= XEXP (dest
, 0);
1947 SET_REGNO_REG_SET (reg_set_bitmap
, REGNO (dest
));
1948 else if (MEM_P (dest
))
1949 record_last_mem_set_info (insn
);
1951 if (GET_CODE (SET_SRC (pat
)) == CALL
)
1955 /* Record things set by a CLOBBER. */
1958 mark_clobber (rtx pat
, rtx insn
)
1960 rtx clob
= XEXP (pat
, 0);
1962 while (GET_CODE (clob
) == SUBREG
|| GET_CODE (clob
) == STRICT_LOW_PART
)
1963 clob
= XEXP (clob
, 0);
1966 SET_REGNO_REG_SET (reg_set_bitmap
, REGNO (clob
));
1968 record_last_mem_set_info (insn
);
1971 /* Record things set by INSN.
1972 This data is used by oprs_not_set_p. */
1975 mark_oprs_set (rtx insn
)
1977 rtx pat
= PATTERN (insn
);
1980 if (GET_CODE (pat
) == SET
)
1981 mark_set (pat
, insn
);
1982 else if (GET_CODE (pat
) == PARALLEL
)
1983 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1985 rtx x
= XVECEXP (pat
, 0, i
);
1987 if (GET_CODE (x
) == SET
)
1989 else if (GET_CODE (x
) == CLOBBER
)
1990 mark_clobber (x
, insn
);
1991 else if (GET_CODE (x
) == CALL
)
1995 else if (GET_CODE (pat
) == CLOBBER
)
1996 mark_clobber (pat
, insn
);
1997 else if (GET_CODE (pat
) == CALL
)
2002 /* Compute copy/constant propagation working variables. */
2004 /* Local properties of assignments. */
2005 static sbitmap
*cprop_pavloc
;
2006 static sbitmap
*cprop_absaltered
;
2008 /* Global properties of assignments (computed from the local properties). */
2009 static sbitmap
*cprop_avin
;
2010 static sbitmap
*cprop_avout
;
2012 /* Allocate vars used for copy/const propagation. N_BLOCKS is the number of
2013 basic blocks. N_SETS is the number of sets. */
2016 alloc_cprop_mem (int n_blocks
, int n_sets
)
2018 cprop_pavloc
= sbitmap_vector_alloc (n_blocks
, n_sets
);
2019 cprop_absaltered
= sbitmap_vector_alloc (n_blocks
, n_sets
);
2021 cprop_avin
= sbitmap_vector_alloc (n_blocks
, n_sets
);
2022 cprop_avout
= sbitmap_vector_alloc (n_blocks
, n_sets
);
2025 /* Free vars used by copy/const propagation. */
2028 free_cprop_mem (void)
2030 sbitmap_vector_free (cprop_pavloc
);
2031 sbitmap_vector_free (cprop_absaltered
);
2032 sbitmap_vector_free (cprop_avin
);
2033 sbitmap_vector_free (cprop_avout
);
2036 /* For each block, compute whether X is transparent. X is either an
2037 expression or an assignment [though we don't care which, for this context
2038 an assignment is treated as an expression]. For each block where an
2039 element of X is modified, set (SET_P == 1) or reset (SET_P == 0) the INDX
2043 compute_transp (const_rtx x
, int indx
, sbitmap
*bmap
, int set_p
)
2049 /* repeat is used to turn tail-recursion into iteration since GCC
2050 can't do it when there's no return value. */
2056 code
= GET_CODE (x
);
2063 for (def
= DF_REG_DEF_CHAIN (REGNO (x
));
2065 def
= DF_REF_NEXT_REG (def
))
2066 SET_BIT (bmap
[DF_REF_BB (def
)->index
], indx
);
2071 for (def
= DF_REG_DEF_CHAIN (REGNO (x
));
2073 def
= DF_REF_NEXT_REG (def
))
2074 RESET_BIT (bmap
[DF_REF_BB (def
)->index
], indx
);
2080 if (! MEM_READONLY_P (x
))
2085 /* First handle all the blocks with calls. We don't need to
2086 do any list walking for them. */
2087 EXECUTE_IF_SET_IN_BITMAP (blocks_with_calls
, 0, bb_index
, bi
)
2090 SET_BIT (bmap
[bb_index
], indx
);
2092 RESET_BIT (bmap
[bb_index
], indx
);
2095 /* Now iterate over the blocks which have memory modifications
2096 but which do not have any calls. */
2097 EXECUTE_IF_AND_COMPL_IN_BITMAP (modify_mem_list_set
,
2101 rtx list_entry
= canon_modify_mem_list
[bb_index
];
2105 rtx dest
, dest_addr
;
2107 /* LIST_ENTRY must be an INSN of some kind that sets memory.
2108 Examine each hunk of memory that is modified. */
2110 dest
= XEXP (list_entry
, 0);
2111 list_entry
= XEXP (list_entry
, 1);
2112 dest_addr
= XEXP (list_entry
, 0);
2114 if (canon_true_dependence (dest
, GET_MODE (dest
), dest_addr
,
2115 x
, NULL_RTX
, rtx_addr_varies_p
))
2118 SET_BIT (bmap
[bb_index
], indx
);
2120 RESET_BIT (bmap
[bb_index
], indx
);
2123 list_entry
= XEXP (list_entry
, 1);
2148 for (i
= GET_RTX_LENGTH (code
) - 1, fmt
= GET_RTX_FORMAT (code
); i
>= 0; i
--)
2152 /* If we are about to do the last recursive call
2153 needed at this level, change it into iteration.
2154 This function is called enough to be worth it. */
2161 compute_transp (XEXP (x
, i
), indx
, bmap
, set_p
);
2163 else if (fmt
[i
] == 'E')
2164 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2165 compute_transp (XVECEXP (x
, i
, j
), indx
, bmap
, set_p
);
2169 /* Top level routine to do the dataflow analysis needed by copy/const
2173 compute_cprop_data (void)
2175 compute_local_properties (cprop_absaltered
, cprop_pavloc
, NULL
, &set_hash_table
);
2176 compute_available (cprop_pavloc
, cprop_absaltered
,
2177 cprop_avout
, cprop_avin
);
2180 /* Copy/constant propagation. */
2182 /* Maximum number of register uses in an insn that we handle. */
2185 /* Table of uses found in an insn.
2186 Allocated statically to avoid alloc/free complexity and overhead. */
2187 static struct reg_use reg_use_table
[MAX_USES
];
2189 /* Index into `reg_use_table' while building it. */
2190 static int reg_use_count
;
2192 /* Set up a list of register numbers used in INSN. The found uses are stored
2193 in `reg_use_table'. `reg_use_count' is initialized to zero before entry,
2194 and contains the number of uses in the table upon exit.
2196 ??? If a register appears multiple times we will record it multiple times.
2197 This doesn't hurt anything but it will slow things down. */
2200 find_used_regs (rtx
*xptr
, void *data ATTRIBUTE_UNUSED
)
2207 /* repeat is used to turn tail-recursion into iteration since GCC
2208 can't do it when there's no return value. */
2213 code
= GET_CODE (x
);
2216 if (reg_use_count
== MAX_USES
)
2219 reg_use_table
[reg_use_count
].reg_rtx
= x
;
2223 /* Recursively scan the operands of this expression. */
2225 for (i
= GET_RTX_LENGTH (code
) - 1, fmt
= GET_RTX_FORMAT (code
); i
>= 0; i
--)
2229 /* If we are about to do the last recursive call
2230 needed at this level, change it into iteration.
2231 This function is called enough to be worth it. */
2238 find_used_regs (&XEXP (x
, i
), data
);
2240 else if (fmt
[i
] == 'E')
2241 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2242 find_used_regs (&XVECEXP (x
, i
, j
), data
);
2246 /* Try to replace all non-SET_DEST occurrences of FROM in INSN with TO.
2247 Returns nonzero is successful. */
2250 try_replace_reg (rtx from
, rtx to
, rtx insn
)
2252 rtx note
= find_reg_equal_equiv_note (insn
);
2255 rtx set
= single_set (insn
);
2257 /* Usually we substitute easy stuff, so we won't copy everything.
2258 We however need to take care to not duplicate non-trivial CONST
2262 validate_replace_src_group (from
, to
, insn
);
2263 if (num_changes_pending () && apply_change_group ())
2266 /* Try to simplify SET_SRC if we have substituted a constant. */
2267 if (success
&& set
&& CONSTANT_P (to
))
2269 src
= simplify_rtx (SET_SRC (set
));
2272 validate_change (insn
, &SET_SRC (set
), src
, 0);
2275 /* If there is already a REG_EQUAL note, update the expression in it
2276 with our replacement. */
2277 if (note
!= 0 && REG_NOTE_KIND (note
) == REG_EQUAL
)
2278 set_unique_reg_note (insn
, REG_EQUAL
,
2279 simplify_replace_rtx (XEXP (note
, 0), from
,
2281 if (!success
&& set
&& reg_mentioned_p (from
, SET_SRC (set
)))
2283 /* If above failed and this is a single set, try to simplify the source of
2284 the set given our substitution. We could perhaps try this for multiple
2285 SETs, but it probably won't buy us anything. */
2286 src
= simplify_replace_rtx (SET_SRC (set
), from
, to
);
2288 if (!rtx_equal_p (src
, SET_SRC (set
))
2289 && validate_change (insn
, &SET_SRC (set
), src
, 0))
2292 /* If we've failed to do replacement, have a single SET, don't already
2293 have a note, and have no special SET, add a REG_EQUAL note to not
2294 lose information. */
2295 if (!success
&& note
== 0 && set
!= 0
2296 && GET_CODE (SET_DEST (set
)) != ZERO_EXTRACT
2297 && GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
)
2298 note
= set_unique_reg_note (insn
, REG_EQUAL
, copy_rtx (src
));
2301 /* REG_EQUAL may get simplified into register.
2302 We don't allow that. Remove that note. This code ought
2303 not to happen, because previous code ought to synthesize
2304 reg-reg move, but be on the safe side. */
2305 if (note
&& REG_NOTE_KIND (note
) == REG_EQUAL
&& REG_P (XEXP (note
, 0)))
2306 remove_note (insn
, note
);
2311 /* Find a set of REGNOs that are available on entry to INSN's block. Returns
2312 NULL no such set is found. */
2314 static struct expr
*
2315 find_avail_set (int regno
, rtx insn
)
2317 /* SET1 contains the last set found that can be returned to the caller for
2318 use in a substitution. */
2319 struct expr
*set1
= 0;
2321 /* Loops are not possible here. To get a loop we would need two sets
2322 available at the start of the block containing INSN. i.e. we would
2323 need two sets like this available at the start of the block:
2325 (set (reg X) (reg Y))
2326 (set (reg Y) (reg X))
2328 This can not happen since the set of (reg Y) would have killed the
2329 set of (reg X) making it unavailable at the start of this block. */
2333 struct expr
*set
= lookup_set (regno
, &set_hash_table
);
2335 /* Find a set that is available at the start of the block
2336 which contains INSN. */
2339 if (TEST_BIT (cprop_avin
[BLOCK_NUM (insn
)], set
->bitmap_index
))
2341 set
= next_set (regno
, set
);
2344 /* If no available set was found we've reached the end of the
2345 (possibly empty) copy chain. */
2349 gcc_assert (GET_CODE (set
->expr
) == SET
);
2351 src
= SET_SRC (set
->expr
);
2353 /* We know the set is available.
2354 Now check that SRC is ANTLOC (i.e. none of the source operands
2355 have changed since the start of the block).
2357 If the source operand changed, we may still use it for the next
2358 iteration of this loop, but we may not use it for substitutions. */
2360 if (gcse_constant_p (src
) || oprs_not_set_p (src
, insn
))
2363 /* If the source of the set is anything except a register, then
2364 we have reached the end of the copy chain. */
2368 /* Follow the copy chain, i.e. start another iteration of the loop
2369 and see if we have an available copy into SRC. */
2370 regno
= REGNO (src
);
2373 /* SET1 holds the last set that was available and anticipatable at
2378 /* Subroutine of cprop_insn that tries to propagate constants into
2379 JUMP_INSNS. JUMP must be a conditional jump. If SETCC is non-NULL
2380 it is the instruction that immediately precedes JUMP, and must be a
2381 single SET of a register. FROM is what we will try to replace,
2382 SRC is the constant we will try to substitute for it. Returns nonzero
2383 if a change was made. */
2386 cprop_jump (basic_block bb
, rtx setcc
, rtx jump
, rtx from
, rtx src
)
2388 rtx new_rtx
, set_src
, note_src
;
2389 rtx set
= pc_set (jump
);
2390 rtx note
= find_reg_equal_equiv_note (jump
);
2394 note_src
= XEXP (note
, 0);
2395 if (GET_CODE (note_src
) == EXPR_LIST
)
2396 note_src
= NULL_RTX
;
2398 else note_src
= NULL_RTX
;
2400 /* Prefer REG_EQUAL notes except those containing EXPR_LISTs. */
2401 set_src
= note_src
? note_src
: SET_SRC (set
);
2403 /* First substitute the SETCC condition into the JUMP instruction,
2404 then substitute that given values into this expanded JUMP. */
2405 if (setcc
!= NULL_RTX
2406 && !modified_between_p (from
, setcc
, jump
)
2407 && !modified_between_p (src
, setcc
, jump
))
2410 rtx setcc_set
= single_set (setcc
);
2411 rtx setcc_note
= find_reg_equal_equiv_note (setcc
);
2412 setcc_src
= (setcc_note
&& GET_CODE (XEXP (setcc_note
, 0)) != EXPR_LIST
)
2413 ? XEXP (setcc_note
, 0) : SET_SRC (setcc_set
);
2414 set_src
= simplify_replace_rtx (set_src
, SET_DEST (setcc_set
),
2420 new_rtx
= simplify_replace_rtx (set_src
, from
, src
);
2422 /* If no simplification can be made, then try the next register. */
2423 if (rtx_equal_p (new_rtx
, SET_SRC (set
)))
2426 /* If this is now a no-op delete it, otherwise this must be a valid insn. */
2427 if (new_rtx
== pc_rtx
)
2431 /* Ensure the value computed inside the jump insn to be equivalent
2432 to one computed by setcc. */
2433 if (setcc
&& modified_in_p (new_rtx
, setcc
))
2435 if (! validate_unshare_change (jump
, &SET_SRC (set
), new_rtx
, 0))
2437 /* When (some) constants are not valid in a comparison, and there
2438 are two registers to be replaced by constants before the entire
2439 comparison can be folded into a constant, we need to keep
2440 intermediate information in REG_EQUAL notes. For targets with
2441 separate compare insns, such notes are added by try_replace_reg.
2442 When we have a combined compare-and-branch instruction, however,
2443 we need to attach a note to the branch itself to make this
2444 optimization work. */
2446 if (!rtx_equal_p (new_rtx
, note_src
))
2447 set_unique_reg_note (jump
, REG_EQUAL
, copy_rtx (new_rtx
));
2451 /* Remove REG_EQUAL note after simplification. */
2453 remove_note (jump
, note
);
2457 /* Delete the cc0 setter. */
2458 if (setcc
!= NULL
&& CC0_P (SET_DEST (single_set (setcc
))))
2459 delete_insn (setcc
);
2462 global_const_prop_count
++;
2463 if (dump_file
!= NULL
)
2466 "GLOBAL CONST-PROP: Replacing reg %d in jump_insn %d with constant ",
2467 REGNO (from
), INSN_UID (jump
));
2468 print_rtl (dump_file
, src
);
2469 fprintf (dump_file
, "\n");
2471 purge_dead_edges (bb
);
2473 /* If a conditional jump has been changed into unconditional jump, remove
2474 the jump and make the edge fallthru - this is always called in
2476 if (new_rtx
!= pc_rtx
&& simplejump_p (jump
))
2481 for (ei
= ei_start (bb
->succs
); (e
= ei_safe_edge (ei
)); ei_next (&ei
))
2482 if (e
->dest
!= EXIT_BLOCK_PTR
2483 && BB_HEAD (e
->dest
) == JUMP_LABEL (jump
))
2485 e
->flags
|= EDGE_FALLTHRU
;
2495 constprop_register (rtx insn
, rtx from
, rtx to
)
2499 /* Check for reg or cc0 setting instructions followed by
2500 conditional branch instructions first. */
2501 if ((sset
= single_set (insn
)) != NULL
2503 && any_condjump_p (NEXT_INSN (insn
)) && onlyjump_p (NEXT_INSN (insn
)))
2505 rtx dest
= SET_DEST (sset
);
2506 if ((REG_P (dest
) || CC0_P (dest
))
2507 && cprop_jump (BLOCK_FOR_INSN (insn
), insn
, NEXT_INSN (insn
), from
, to
))
2511 /* Handle normal insns next. */
2512 if (NONJUMP_INSN_P (insn
)
2513 && try_replace_reg (from
, to
, insn
))
2516 /* Try to propagate a CONST_INT into a conditional jump.
2517 We're pretty specific about what we will handle in this
2518 code, we can extend this as necessary over time.
2520 Right now the insn in question must look like
2521 (set (pc) (if_then_else ...)) */
2522 else if (any_condjump_p (insn
) && onlyjump_p (insn
))
2523 return cprop_jump (BLOCK_FOR_INSN (insn
), NULL
, insn
, from
, to
);
2527 /* Perform constant and copy propagation on INSN.
2528 The result is nonzero if a change was made. */
2531 cprop_insn (rtx insn
)
2533 struct reg_use
*reg_used
;
2541 note_uses (&PATTERN (insn
), find_used_regs
, NULL
);
2543 note
= find_reg_equal_equiv_note (insn
);
2545 /* We may win even when propagating constants into notes. */
2547 find_used_regs (&XEXP (note
, 0), NULL
);
2549 for (reg_used
= ®_use_table
[0]; reg_use_count
> 0;
2550 reg_used
++, reg_use_count
--)
2552 unsigned int regno
= REGNO (reg_used
->reg_rtx
);
2556 /* If the register has already been set in this block, there's
2557 nothing we can do. */
2558 if (! oprs_not_set_p (reg_used
->reg_rtx
, insn
))
2561 /* Find an assignment that sets reg_used and is available
2562 at the start of the block. */
2563 set
= find_avail_set (regno
, insn
);
2568 /* ??? We might be able to handle PARALLELs. Later. */
2569 gcc_assert (GET_CODE (pat
) == SET
);
2571 src
= SET_SRC (pat
);
2573 /* Constant propagation. */
2574 if (gcse_constant_p (src
))
2576 if (constprop_register (insn
, reg_used
->reg_rtx
, src
))
2579 global_const_prop_count
++;
2580 if (dump_file
!= NULL
)
2582 fprintf (dump_file
, "GLOBAL CONST-PROP: Replacing reg %d in ", regno
);
2583 fprintf (dump_file
, "insn %d with constant ", INSN_UID (insn
));
2584 print_rtl (dump_file
, src
);
2585 fprintf (dump_file
, "\n");
2587 if (INSN_DELETED_P (insn
))
2591 else if (REG_P (src
)
2592 && REGNO (src
) >= FIRST_PSEUDO_REGISTER
2593 && REGNO (src
) != regno
)
2595 if (try_replace_reg (reg_used
->reg_rtx
, src
, insn
))
2598 global_copy_prop_count
++;
2599 if (dump_file
!= NULL
)
2601 fprintf (dump_file
, "GLOBAL COPY-PROP: Replacing reg %d in insn %d",
2602 regno
, INSN_UID (insn
));
2603 fprintf (dump_file
, " with reg %d\n", REGNO (src
));
2606 /* The original insn setting reg_used may or may not now be
2607 deletable. We leave the deletion to flow. */
2608 /* FIXME: If it turns out that the insn isn't deletable,
2609 then we may have unnecessarily extended register lifetimes
2610 and made things worse. */
2615 if (changed
&& DEBUG_INSN_P (insn
))
2621 /* Like find_used_regs, but avoid recording uses that appear in
2622 input-output contexts such as zero_extract or pre_dec. This
2623 restricts the cases we consider to those for which local cprop
2624 can legitimately make replacements. */
2627 local_cprop_find_used_regs (rtx
*xptr
, void *data
)
2634 switch (GET_CODE (x
))
2638 case STRICT_LOW_PART
:
2647 /* Can only legitimately appear this early in the context of
2648 stack pushes for function arguments, but handle all of the
2649 codes nonetheless. */
2653 /* Setting a subreg of a register larger than word_mode leaves
2654 the non-written words unchanged. */
2655 if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x
))) > BITS_PER_WORD
)
2663 find_used_regs (xptr
, data
);
2666 /* Try to perform local const/copy propagation on X in INSN. */
2669 do_local_cprop (rtx x
, rtx insn
)
2671 rtx newreg
= NULL
, newcnst
= NULL
;
2673 /* Rule out USE instructions and ASM statements as we don't want to
2674 change the hard registers mentioned. */
2676 && (REGNO (x
) >= FIRST_PSEUDO_REGISTER
2677 || (GET_CODE (PATTERN (insn
)) != USE
2678 && asm_noperands (PATTERN (insn
)) < 0)))
2680 cselib_val
*val
= cselib_lookup (x
, GET_MODE (x
), 0);
2681 struct elt_loc_list
*l
;
2685 for (l
= val
->locs
; l
; l
= l
->next
)
2687 rtx this_rtx
= l
->loc
;
2690 if (gcse_constant_p (this_rtx
))
2692 if (REG_P (this_rtx
) && REGNO (this_rtx
) >= FIRST_PSEUDO_REGISTER
2693 /* Don't copy propagate if it has attached REG_EQUIV note.
2694 At this point this only function parameters should have
2695 REG_EQUIV notes and if the argument slot is used somewhere
2696 explicitly, it means address of parameter has been taken,
2697 so we should not extend the lifetime of the pseudo. */
2698 && (!(note
= find_reg_note (l
->setting_insn
, REG_EQUIV
, NULL_RTX
))
2699 || ! MEM_P (XEXP (note
, 0))))
2702 if (newcnst
&& constprop_register (insn
, x
, newcnst
))
2704 if (dump_file
!= NULL
)
2706 fprintf (dump_file
, "LOCAL CONST-PROP: Replacing reg %d in ",
2708 fprintf (dump_file
, "insn %d with constant ",
2710 print_rtl (dump_file
, newcnst
);
2711 fprintf (dump_file
, "\n");
2713 local_const_prop_count
++;
2716 else if (newreg
&& newreg
!= x
&& try_replace_reg (x
, newreg
, insn
))
2718 if (dump_file
!= NULL
)
2721 "LOCAL COPY-PROP: Replacing reg %d in insn %d",
2722 REGNO (x
), INSN_UID (insn
));
2723 fprintf (dump_file
, " with reg %d\n", REGNO (newreg
));
2725 local_copy_prop_count
++;
2732 /* Do local const/copy propagation (i.e. within each basic block). */
2735 local_cprop_pass (void)
2739 struct reg_use
*reg_used
;
2740 bool changed
= false;
2742 cselib_init (false);
2745 FOR_BB_INSNS (bb
, insn
)
2749 rtx note
= find_reg_equal_equiv_note (insn
);
2753 note_uses (&PATTERN (insn
), local_cprop_find_used_regs
,
2756 local_cprop_find_used_regs (&XEXP (note
, 0), NULL
);
2758 for (reg_used
= ®_use_table
[0]; reg_use_count
> 0;
2759 reg_used
++, reg_use_count
--)
2761 if (do_local_cprop (reg_used
->reg_rtx
, insn
))
2767 if (INSN_DELETED_P (insn
))
2770 while (reg_use_count
);
2772 cselib_process_insn (insn
);
2775 /* Forget everything at the end of a basic block. */
2776 cselib_clear_table ();
2784 /* Similar to get_condition, only the resulting condition must be
2785 valid at JUMP, instead of at EARLIEST.
2787 This differs from noce_get_condition in ifcvt.c in that we prefer not to
2788 settle for the condition variable in the jump instruction being integral.
2789 We prefer to be able to record the value of a user variable, rather than
2790 the value of a temporary used in a condition. This could be solved by
2791 recording the value of *every* register scanned by canonicalize_condition,
2792 but this would require some code reorganization. */
2795 fis_get_condition (rtx jump
)
2797 return get_condition (jump
, NULL
, false, true);
2800 /* Check the comparison COND to see if we can safely form an implicit set from
2801 it. COND is either an EQ or NE comparison. */
2804 implicit_set_cond_p (const_rtx cond
)
2806 const enum machine_mode mode
= GET_MODE (XEXP (cond
, 0));
2807 const_rtx cst
= XEXP (cond
, 1);
2809 /* We can't perform this optimization if either operand might be or might
2810 contain a signed zero. */
2811 if (HONOR_SIGNED_ZEROS (mode
))
2813 /* It is sufficient to check if CST is or contains a zero. We must
2814 handle float, complex, and vector. If any subpart is a zero, then
2815 the optimization can't be performed. */
2816 /* ??? The complex and vector checks are not implemented yet. We just
2817 always return zero for them. */
2818 if (GET_CODE (cst
) == CONST_DOUBLE
)
2821 REAL_VALUE_FROM_CONST_DOUBLE (d
, cst
);
2822 if (REAL_VALUES_EQUAL (d
, dconst0
))
2829 return gcse_constant_p (cst
);
2832 /* Find the implicit sets of a function. An "implicit set" is a constraint
2833 on the value of a variable, implied by a conditional jump. For example,
2834 following "if (x == 2)", the then branch may be optimized as though the
2835 conditional performed an "explicit set", in this example, "x = 2". This
2836 function records the set patterns that are implicit at the start of each
2839 FIXME: This would be more effective if critical edges are pre-split. As
2840 it is now, we can't record implicit sets for blocks that have
2841 critical successor edges. This results in missed optimizations
2842 and in more (unnecessary) work in cfgcleanup.c:thread_jump(). */
2845 find_implicit_sets (void)
2847 basic_block bb
, dest
;
2853 /* Check for more than one successor. */
2854 if (EDGE_COUNT (bb
->succs
) > 1)
2856 cond
= fis_get_condition (BB_END (bb
));
2859 && (GET_CODE (cond
) == EQ
|| GET_CODE (cond
) == NE
)
2860 && REG_P (XEXP (cond
, 0))
2861 && REGNO (XEXP (cond
, 0)) >= FIRST_PSEUDO_REGISTER
2862 && implicit_set_cond_p (cond
))
2864 dest
= GET_CODE (cond
) == EQ
? BRANCH_EDGE (bb
)->dest
2865 : FALLTHRU_EDGE (bb
)->dest
;
2868 /* Record nothing for a critical edge. */
2869 && single_pred_p (dest
)
2870 && dest
!= EXIT_BLOCK_PTR
)
2872 new_rtx
= gen_rtx_SET (VOIDmode
, XEXP (cond
, 0),
2874 implicit_sets
[dest
->index
] = new_rtx
;
2877 fprintf(dump_file
, "Implicit set of reg %d in ",
2878 REGNO (XEXP (cond
, 0)));
2879 fprintf(dump_file
, "basic block %d\n", dest
->index
);
2887 fprintf (dump_file
, "Found %d implicit sets\n", count
);
2890 /* Bypass conditional jumps. */
2892 /* The value of last_basic_block at the beginning of the jump_bypass
2893 pass. The use of redirect_edge_and_branch_force may introduce new
2894 basic blocks, but the data flow analysis is only valid for basic
2895 block indices less than bypass_last_basic_block. */
2897 static int bypass_last_basic_block
;
2899 /* Find a set of REGNO to a constant that is available at the end of basic
2900 block BB. Returns NULL if no such set is found. Based heavily upon
2903 static struct expr
*
2904 find_bypass_set (int regno
, int bb
)
2906 struct expr
*result
= 0;
2911 struct expr
*set
= lookup_set (regno
, &set_hash_table
);
2915 if (TEST_BIT (cprop_avout
[bb
], set
->bitmap_index
))
2917 set
= next_set (regno
, set
);
2923 gcc_assert (GET_CODE (set
->expr
) == SET
);
2925 src
= SET_SRC (set
->expr
);
2926 if (gcse_constant_p (src
))
2932 regno
= REGNO (src
);
2938 /* Subroutine of bypass_block that checks whether a pseudo is killed by
2939 any of the instructions inserted on an edge. Jump bypassing places
2940 condition code setters on CFG edges using insert_insn_on_edge. This
2941 function is required to check that our data flow analysis is still
2942 valid prior to commit_edge_insertions. */
2945 reg_killed_on_edge (const_rtx reg
, const_edge e
)
2949 for (insn
= e
->insns
.r
; insn
; insn
= NEXT_INSN (insn
))
2950 if (INSN_P (insn
) && reg_set_p (reg
, insn
))
2956 /* Subroutine of bypass_conditional_jumps that attempts to bypass the given
2957 basic block BB which has more than one predecessor. If not NULL, SETCC
2958 is the first instruction of BB, which is immediately followed by JUMP_INSN
2959 JUMP. Otherwise, SETCC is NULL, and JUMP is the first insn of BB.
2960 Returns nonzero if a change was made.
2962 During the jump bypassing pass, we may place copies of SETCC instructions
2963 on CFG edges. The following routine must be careful to pay attention to
2964 these inserted insns when performing its transformations. */
2967 bypass_block (basic_block bb
, rtx setcc
, rtx jump
)
2972 int may_be_loop_header
;
2976 insn
= (setcc
!= NULL
) ? setcc
: jump
;
2978 /* Determine set of register uses in INSN. */
2980 note_uses (&PATTERN (insn
), find_used_regs
, NULL
);
2981 note
= find_reg_equal_equiv_note (insn
);
2983 find_used_regs (&XEXP (note
, 0), NULL
);
2985 may_be_loop_header
= false;
2986 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
2987 if (e
->flags
& EDGE_DFS_BACK
)
2989 may_be_loop_header
= true;
2994 for (ei
= ei_start (bb
->preds
); (e
= ei_safe_edge (ei
)); )
2998 if (e
->flags
& EDGE_COMPLEX
)
3004 /* We can't redirect edges from new basic blocks. */
3005 if (e
->src
->index
>= bypass_last_basic_block
)
3011 /* The irreducible loops created by redirecting of edges entering the
3012 loop from outside would decrease effectiveness of some of the following
3013 optimizations, so prevent this. */
3014 if (may_be_loop_header
3015 && !(e
->flags
& EDGE_DFS_BACK
))
3021 for (i
= 0; i
< reg_use_count
; i
++)
3023 struct reg_use
*reg_used
= ®_use_table
[i
];
3024 unsigned int regno
= REGNO (reg_used
->reg_rtx
);
3025 basic_block dest
, old_dest
;
3029 set
= find_bypass_set (regno
, e
->src
->index
);
3034 /* Check the data flow is valid after edge insertions. */
3035 if (e
->insns
.r
&& reg_killed_on_edge (reg_used
->reg_rtx
, e
))
3038 src
= SET_SRC (pc_set (jump
));
3041 src
= simplify_replace_rtx (src
,
3042 SET_DEST (PATTERN (setcc
)),
3043 SET_SRC (PATTERN (setcc
)));
3045 new_rtx
= simplify_replace_rtx (src
, reg_used
->reg_rtx
,
3046 SET_SRC (set
->expr
));
3048 /* Jump bypassing may have already placed instructions on
3049 edges of the CFG. We can't bypass an outgoing edge that
3050 has instructions associated with it, as these insns won't
3051 get executed if the incoming edge is redirected. */
3053 if (new_rtx
== pc_rtx
)
3055 edest
= FALLTHRU_EDGE (bb
);
3056 dest
= edest
->insns
.r
? NULL
: edest
->dest
;
3058 else if (GET_CODE (new_rtx
) == LABEL_REF
)
3060 dest
= BLOCK_FOR_INSN (XEXP (new_rtx
, 0));
3061 /* Don't bypass edges containing instructions. */
3062 edest
= find_edge (bb
, dest
);
3063 if (edest
&& edest
->insns
.r
)
3069 /* Avoid unification of the edge with other edges from original
3070 branch. We would end up emitting the instruction on "both"
3073 if (dest
&& setcc
&& !CC0_P (SET_DEST (PATTERN (setcc
)))
3074 && find_edge (e
->src
, dest
))
3080 && dest
!= EXIT_BLOCK_PTR
)
3082 redirect_edge_and_branch_force (e
, dest
);
3084 /* Copy the register setter to the redirected edge.
3085 Don't copy CC0 setters, as CC0 is dead after jump. */
3088 rtx pat
= PATTERN (setcc
);
3089 if (!CC0_P (SET_DEST (pat
)))
3090 insert_insn_on_edge (copy_insn (pat
), e
);
3093 if (dump_file
!= NULL
)
3095 fprintf (dump_file
, "JUMP-BYPASS: Proved reg %d "
3096 "in jump_insn %d equals constant ",
3097 regno
, INSN_UID (jump
));
3098 print_rtl (dump_file
, SET_SRC (set
->expr
));
3099 fprintf (dump_file
, "\nBypass edge from %d->%d to %d\n",
3100 e
->src
->index
, old_dest
->index
, dest
->index
);
3113 /* Find basic blocks with more than one predecessor that only contain a
3114 single conditional jump. If the result of the comparison is known at
3115 compile-time from any incoming edge, redirect that edge to the
3116 appropriate target. Returns nonzero if a change was made.
3118 This function is now mis-named, because we also handle indirect jumps. */
3121 bypass_conditional_jumps (void)
3129 /* Note we start at block 1. */
3130 if (ENTRY_BLOCK_PTR
->next_bb
== EXIT_BLOCK_PTR
)
3133 bypass_last_basic_block
= last_basic_block
;
3134 mark_dfs_back_edges ();
3137 FOR_BB_BETWEEN (bb
, ENTRY_BLOCK_PTR
->next_bb
->next_bb
,
3138 EXIT_BLOCK_PTR
, next_bb
)
3140 /* Check for more than one predecessor. */
3141 if (!single_pred_p (bb
))
3144 FOR_BB_INSNS (bb
, insn
)
3145 if (DEBUG_INSN_P (insn
))
3147 else if (NONJUMP_INSN_P (insn
))
3151 if (GET_CODE (PATTERN (insn
)) != SET
)
3154 dest
= SET_DEST (PATTERN (insn
));
3155 if (REG_P (dest
) || CC0_P (dest
))
3160 else if (JUMP_P (insn
))
3162 if ((any_condjump_p (insn
) || computed_jump_p (insn
))
3163 && onlyjump_p (insn
))
3164 changed
|= bypass_block (bb
, setcc
, insn
);
3167 else if (INSN_P (insn
))
3172 /* If we bypassed any register setting insns, we inserted a
3173 copy on the redirected edge. These need to be committed. */
3175 commit_edge_insertions ();
3180 /* Compute PRE+LCM working variables. */
3182 /* Local properties of expressions. */
3183 /* Nonzero for expressions that are transparent in the block. */
3184 static sbitmap
*transp
;
3186 /* Nonzero for expressions that are transparent at the end of the block.
3187 This is only zero for expressions killed by abnormal critical edge
3188 created by a calls. */
3189 static sbitmap
*transpout
;
3191 /* Nonzero for expressions that are computed (available) in the block. */
3192 static sbitmap
*comp
;
3194 /* Nonzero for expressions that are locally anticipatable in the block. */
3195 static sbitmap
*antloc
;
3197 /* Nonzero for expressions where this block is an optimal computation
3199 static sbitmap
*pre_optimal
;
3201 /* Nonzero for expressions which are redundant in a particular block. */
3202 static sbitmap
*pre_redundant
;
3204 /* Nonzero for expressions which should be inserted on a specific edge. */
3205 static sbitmap
*pre_insert_map
;
3207 /* Nonzero for expressions which should be deleted in a specific block. */
3208 static sbitmap
*pre_delete_map
;
3210 /* Contains the edge_list returned by pre_edge_lcm. */
3211 static struct edge_list
*edge_list
;
3213 /* Allocate vars used for PRE analysis. */
3216 alloc_pre_mem (int n_blocks
, int n_exprs
)
3218 transp
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
3219 comp
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
3220 antloc
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
3223 pre_redundant
= NULL
;
3224 pre_insert_map
= NULL
;
3225 pre_delete_map
= NULL
;
3226 ae_kill
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
3228 /* pre_insert and pre_delete are allocated later. */
3231 /* Free vars used for PRE analysis. */
3236 sbitmap_vector_free (transp
);
3237 sbitmap_vector_free (comp
);
3239 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
3242 sbitmap_vector_free (pre_optimal
);
3244 sbitmap_vector_free (pre_redundant
);
3246 sbitmap_vector_free (pre_insert_map
);
3248 sbitmap_vector_free (pre_delete_map
);
3250 transp
= comp
= NULL
;
3251 pre_optimal
= pre_redundant
= pre_insert_map
= pre_delete_map
= NULL
;
3254 /* Top level routine to do the dataflow analysis needed by PRE. */
3257 compute_pre_data (void)
3259 sbitmap trapping_expr
;
3263 compute_local_properties (transp
, comp
, antloc
, &expr_hash_table
);
3264 sbitmap_vector_zero (ae_kill
, last_basic_block
);
3266 /* Collect expressions which might trap. */
3267 trapping_expr
= sbitmap_alloc (expr_hash_table
.n_elems
);
3268 sbitmap_zero (trapping_expr
);
3269 for (ui
= 0; ui
< expr_hash_table
.size
; ui
++)
3272 for (e
= expr_hash_table
.table
[ui
]; e
!= NULL
; e
= e
->next_same_hash
)
3273 if (may_trap_p (e
->expr
))
3274 SET_BIT (trapping_expr
, e
->bitmap_index
);
3277 /* Compute ae_kill for each basic block using:
3287 /* If the current block is the destination of an abnormal edge, we
3288 kill all trapping expressions because we won't be able to properly
3289 place the instruction on the edge. So make them neither
3290 anticipatable nor transparent. This is fairly conservative. */
3291 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
3292 if (e
->flags
& EDGE_ABNORMAL
)
3294 sbitmap_difference (antloc
[bb
->index
], antloc
[bb
->index
], trapping_expr
);
3295 sbitmap_difference (transp
[bb
->index
], transp
[bb
->index
], trapping_expr
);
3299 sbitmap_a_or_b (ae_kill
[bb
->index
], transp
[bb
->index
], comp
[bb
->index
]);
3300 sbitmap_not (ae_kill
[bb
->index
], ae_kill
[bb
->index
]);
3303 edge_list
= pre_edge_lcm (expr_hash_table
.n_elems
, transp
, comp
, antloc
,
3304 ae_kill
, &pre_insert_map
, &pre_delete_map
);
3305 sbitmap_vector_free (antloc
);
3307 sbitmap_vector_free (ae_kill
);
3309 sbitmap_free (trapping_expr
);
3314 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
3317 VISITED is a pointer to a working buffer for tracking which BB's have
3318 been visited. It is NULL for the top-level call.
3320 We treat reaching expressions that go through blocks containing the same
3321 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
3322 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
3323 2 as not reaching. The intent is to improve the probability of finding
3324 only one reaching expression and to reduce register lifetimes by picking
3325 the closest such expression. */
3328 pre_expr_reaches_here_p_work (basic_block occr_bb
, struct expr
*expr
, basic_block bb
, char *visited
)
3333 FOR_EACH_EDGE (pred
, ei
, bb
->preds
)
3335 basic_block pred_bb
= pred
->src
;
3337 if (pred
->src
== ENTRY_BLOCK_PTR
3338 /* Has predecessor has already been visited? */
3339 || visited
[pred_bb
->index
])
3340 ;/* Nothing to do. */
3342 /* Does this predecessor generate this expression? */
3343 else if (TEST_BIT (comp
[pred_bb
->index
], expr
->bitmap_index
))
3345 /* Is this the occurrence we're looking for?
3346 Note that there's only one generating occurrence per block
3347 so we just need to check the block number. */
3348 if (occr_bb
== pred_bb
)
3351 visited
[pred_bb
->index
] = 1;
3353 /* Ignore this predecessor if it kills the expression. */
3354 else if (! TEST_BIT (transp
[pred_bb
->index
], expr
->bitmap_index
))
3355 visited
[pred_bb
->index
] = 1;
3357 /* Neither gen nor kill. */
3360 visited
[pred_bb
->index
] = 1;
3361 if (pre_expr_reaches_here_p_work (occr_bb
, expr
, pred_bb
, visited
))
3366 /* All paths have been checked. */
3370 /* The wrapper for pre_expr_reaches_here_work that ensures that any
3371 memory allocated for that function is returned. */
3374 pre_expr_reaches_here_p (basic_block occr_bb
, struct expr
*expr
, basic_block bb
)
3377 char *visited
= XCNEWVEC (char, last_basic_block
);
3379 rval
= pre_expr_reaches_here_p_work (occr_bb
, expr
, bb
, visited
);
3386 /* Given an expr, generate RTL which we can insert at the end of a BB,
3387 or on an edge. Set the block number of any insns generated to
3391 process_insert_insn (struct expr
*expr
)
3393 rtx reg
= expr
->reaching_reg
;
3394 rtx exp
= copy_rtx (expr
->expr
);
3399 /* If the expression is something that's an operand, like a constant,
3400 just copy it to a register. */
3401 if (general_operand (exp
, GET_MODE (reg
)))
3402 emit_move_insn (reg
, exp
);
3404 /* Otherwise, make a new insn to compute this expression and make sure the
3405 insn will be recognized (this also adds any needed CLOBBERs). Copy the
3406 expression to make sure we don't have any sharing issues. */
3409 rtx insn
= emit_insn (gen_rtx_SET (VOIDmode
, reg
, exp
));
3411 if (insn_invalid_p (insn
))
3422 /* Add EXPR to the end of basic block BB.
3424 This is used by both the PRE and code hoisting.
3426 For PRE, we want to verify that the expr is either transparent
3427 or locally anticipatable in the target block. This check makes
3428 no sense for code hoisting. */
3431 insert_insn_end_basic_block (struct expr
*expr
, basic_block bb
, int pre
)
3433 rtx insn
= BB_END (bb
);
3435 rtx reg
= expr
->reaching_reg
;
3436 int regno
= REGNO (reg
);
3439 pat
= process_insert_insn (expr
);
3440 gcc_assert (pat
&& INSN_P (pat
));
3443 while (NEXT_INSN (pat_end
) != NULL_RTX
)
3444 pat_end
= NEXT_INSN (pat_end
);
3446 /* If the last insn is a jump, insert EXPR in front [taking care to
3447 handle cc0, etc. properly]. Similarly we need to care trapping
3448 instructions in presence of non-call exceptions. */
3451 || (NONJUMP_INSN_P (insn
)
3452 && (!single_succ_p (bb
)
3453 || single_succ_edge (bb
)->flags
& EDGE_ABNORMAL
)))
3458 /* It should always be the case that we can put these instructions
3459 anywhere in the basic block with performing PRE optimizations.
3461 gcc_assert (!NONJUMP_INSN_P (insn
) || !pre
3462 || TEST_BIT (antloc
[bb
->index
], expr
->bitmap_index
)
3463 || TEST_BIT (transp
[bb
->index
], expr
->bitmap_index
));
3465 /* If this is a jump table, then we can't insert stuff here. Since
3466 we know the previous real insn must be the tablejump, we insert
3467 the new instruction just before the tablejump. */
3468 if (GET_CODE (PATTERN (insn
)) == ADDR_VEC
3469 || GET_CODE (PATTERN (insn
)) == ADDR_DIFF_VEC
)
3470 insn
= prev_real_insn (insn
);
3473 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
3474 if cc0 isn't set. */
3475 note
= find_reg_note (insn
, REG_CC_SETTER
, NULL_RTX
);
3477 insn
= XEXP (note
, 0);
3480 rtx maybe_cc0_setter
= prev_nonnote_insn (insn
);
3481 if (maybe_cc0_setter
3482 && INSN_P (maybe_cc0_setter
)
3483 && sets_cc0_p (PATTERN (maybe_cc0_setter
)))
3484 insn
= maybe_cc0_setter
;
3487 /* FIXME: What if something in cc0/jump uses value set in new insn? */
3488 new_insn
= emit_insn_before_noloc (pat
, insn
, bb
);
3491 /* Likewise if the last insn is a call, as will happen in the presence
3492 of exception handling. */
3493 else if (CALL_P (insn
)
3494 && (!single_succ_p (bb
)
3495 || single_succ_edge (bb
)->flags
& EDGE_ABNORMAL
))
3497 /* Keeping in mind SMALL_REGISTER_CLASSES and parameters in registers,
3498 we search backward and place the instructions before the first
3499 parameter is loaded. Do this for everyone for consistency and a
3500 presumption that we'll get better code elsewhere as well.
3502 It should always be the case that we can put these instructions
3503 anywhere in the basic block with performing PRE optimizations.
3507 || TEST_BIT (antloc
[bb
->index
], expr
->bitmap_index
)
3508 || TEST_BIT (transp
[bb
->index
], expr
->bitmap_index
));
3510 /* Since different machines initialize their parameter registers
3511 in different orders, assume nothing. Collect the set of all
3512 parameter registers. */
3513 insn
= find_first_parameter_load (insn
, BB_HEAD (bb
));
3515 /* If we found all the parameter loads, then we want to insert
3516 before the first parameter load.
3518 If we did not find all the parameter loads, then we might have
3519 stopped on the head of the block, which could be a CODE_LABEL.
3520 If we inserted before the CODE_LABEL, then we would be putting
3521 the insn in the wrong basic block. In that case, put the insn
3522 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
3523 while (LABEL_P (insn
)
3524 || NOTE_INSN_BASIC_BLOCK_P (insn
))
3525 insn
= NEXT_INSN (insn
);
3527 new_insn
= emit_insn_before_noloc (pat
, insn
, bb
);
3530 new_insn
= emit_insn_after_noloc (pat
, insn
, bb
);
3535 add_label_notes (PATTERN (pat
), new_insn
);
3538 pat
= NEXT_INSN (pat
);
3541 gcse_create_count
++;
3545 fprintf (dump_file
, "PRE/HOIST: end of bb %d, insn %d, ",
3546 bb
->index
, INSN_UID (new_insn
));
3547 fprintf (dump_file
, "copying expression %d to reg %d\n",
3548 expr
->bitmap_index
, regno
);
3552 /* Insert partially redundant expressions on edges in the CFG to make
3553 the expressions fully redundant. */
3556 pre_edge_insert (struct edge_list
*edge_list
, struct expr
**index_map
)
3558 int e
, i
, j
, num_edges
, set_size
, did_insert
= 0;
3561 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
3562 if it reaches any of the deleted expressions. */
3564 set_size
= pre_insert_map
[0]->size
;
3565 num_edges
= NUM_EDGES (edge_list
);
3566 inserted
= sbitmap_vector_alloc (num_edges
, expr_hash_table
.n_elems
);
3567 sbitmap_vector_zero (inserted
, num_edges
);
3569 for (e
= 0; e
< num_edges
; e
++)
3572 basic_block bb
= INDEX_EDGE_PRED_BB (edge_list
, e
);
3574 for (i
= indx
= 0; i
< set_size
; i
++, indx
+= SBITMAP_ELT_BITS
)
3576 SBITMAP_ELT_TYPE insert
= pre_insert_map
[e
]->elms
[i
];
3578 for (j
= indx
; insert
&& j
< (int) expr_hash_table
.n_elems
; j
++, insert
>>= 1)
3579 if ((insert
& 1) != 0 && index_map
[j
]->reaching_reg
!= NULL_RTX
)
3581 struct expr
*expr
= index_map
[j
];
3584 /* Now look at each deleted occurrence of this expression. */
3585 for (occr
= expr
->antic_occr
; occr
!= NULL
; occr
= occr
->next
)
3587 if (! occr
->deleted_p
)
3590 /* Insert this expression on this edge if it would
3591 reach the deleted occurrence in BB. */
3592 if (!TEST_BIT (inserted
[e
], j
))
3595 edge eg
= INDEX_EDGE (edge_list
, e
);
3597 /* We can't insert anything on an abnormal and
3598 critical edge, so we insert the insn at the end of
3599 the previous block. There are several alternatives
3600 detailed in Morgans book P277 (sec 10.5) for
3601 handling this situation. This one is easiest for
3604 if (eg
->flags
& EDGE_ABNORMAL
)
3605 insert_insn_end_basic_block (index_map
[j
], bb
, 0);
3608 insn
= process_insert_insn (index_map
[j
]);
3609 insert_insn_on_edge (insn
, eg
);
3614 fprintf (dump_file
, "PRE: edge (%d,%d), ",
3616 INDEX_EDGE_SUCC_BB (edge_list
, e
)->index
);
3617 fprintf (dump_file
, "copy expression %d\n",
3618 expr
->bitmap_index
);
3621 update_ld_motion_stores (expr
);
3622 SET_BIT (inserted
[e
], j
);
3624 gcse_create_count
++;
3631 sbitmap_vector_free (inserted
);
3635 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
3636 Given "old_reg <- expr" (INSN), instead of adding after it
3637 reaching_reg <- old_reg
3638 it's better to do the following:
3639 reaching_reg <- expr
3640 old_reg <- reaching_reg
3641 because this way copy propagation can discover additional PRE
3642 opportunities. But if this fails, we try the old way.
3643 When "expr" is a store, i.e.
3644 given "MEM <- old_reg", instead of adding after it
3645 reaching_reg <- old_reg
3646 it's better to add it before as follows:
3647 reaching_reg <- old_reg
3648 MEM <- reaching_reg. */
3651 pre_insert_copy_insn (struct expr
*expr
, rtx insn
)
3653 rtx reg
= expr
->reaching_reg
;
3654 int regno
= REGNO (reg
);
3655 int indx
= expr
->bitmap_index
;
3656 rtx pat
= PATTERN (insn
);
3657 rtx set
, first_set
, new_insn
;
3661 /* This block matches the logic in hash_scan_insn. */
3662 switch (GET_CODE (pat
))
3669 /* Search through the parallel looking for the set whose
3670 source was the expression that we're interested in. */
3671 first_set
= NULL_RTX
;
3673 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
3675 rtx x
= XVECEXP (pat
, 0, i
);
3676 if (GET_CODE (x
) == SET
)
3678 /* If the source was a REG_EQUAL or REG_EQUIV note, we
3679 may not find an equivalent expression, but in this
3680 case the PARALLEL will have a single set. */
3681 if (first_set
== NULL_RTX
)
3683 if (expr_equiv_p (SET_SRC (x
), expr
->expr
))
3691 gcc_assert (first_set
);
3692 if (set
== NULL_RTX
)
3700 if (REG_P (SET_DEST (set
)))
3702 old_reg
= SET_DEST (set
);
3703 /* Check if we can modify the set destination in the original insn. */
3704 if (validate_change (insn
, &SET_DEST (set
), reg
, 0))
3706 new_insn
= gen_move_insn (old_reg
, reg
);
3707 new_insn
= emit_insn_after (new_insn
, insn
);
3711 new_insn
= gen_move_insn (reg
, old_reg
);
3712 new_insn
= emit_insn_after (new_insn
, insn
);
3715 else /* This is possible only in case of a store to memory. */
3717 old_reg
= SET_SRC (set
);
3718 new_insn
= gen_move_insn (reg
, old_reg
);
3720 /* Check if we can modify the set source in the original insn. */
3721 if (validate_change (insn
, &SET_SRC (set
), reg
, 0))
3722 new_insn
= emit_insn_before (new_insn
, insn
);
3724 new_insn
= emit_insn_after (new_insn
, insn
);
3727 gcse_create_count
++;
3731 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
3732 BLOCK_NUM (insn
), INSN_UID (new_insn
), indx
,
3733 INSN_UID (insn
), regno
);
3736 /* Copy available expressions that reach the redundant expression
3737 to `reaching_reg'. */
3740 pre_insert_copies (void)
3742 unsigned int i
, added_copy
;
3747 /* For each available expression in the table, copy the result to
3748 `reaching_reg' if the expression reaches a deleted one.
3750 ??? The current algorithm is rather brute force.
3751 Need to do some profiling. */
3753 for (i
= 0; i
< expr_hash_table
.size
; i
++)
3754 for (expr
= expr_hash_table
.table
[i
]; expr
!= NULL
; expr
= expr
->next_same_hash
)
3756 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
3757 we don't want to insert a copy here because the expression may not
3758 really be redundant. So only insert an insn if the expression was
3759 deleted. This test also avoids further processing if the
3760 expression wasn't deleted anywhere. */
3761 if (expr
->reaching_reg
== NULL
)
3764 /* Set when we add a copy for that expression. */
3767 for (occr
= expr
->antic_occr
; occr
!= NULL
; occr
= occr
->next
)
3769 if (! occr
->deleted_p
)
3772 for (avail
= expr
->avail_occr
; avail
!= NULL
; avail
= avail
->next
)
3774 rtx insn
= avail
->insn
;
3776 /* No need to handle this one if handled already. */
3777 if (avail
->copied_p
)
3780 /* Don't handle this one if it's a redundant one. */
3781 if (INSN_DELETED_P (insn
))
3784 /* Or if the expression doesn't reach the deleted one. */
3785 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail
->insn
),
3787 BLOCK_FOR_INSN (occr
->insn
)))
3792 /* Copy the result of avail to reaching_reg. */
3793 pre_insert_copy_insn (expr
, insn
);
3794 avail
->copied_p
= 1;
3799 update_ld_motion_stores (expr
);
3803 /* Emit move from SRC to DEST noting the equivalence with expression computed
3806 gcse_emit_move_after (rtx src
, rtx dest
, rtx insn
)
3809 rtx set
= single_set (insn
), set2
;
3813 /* This should never fail since we're creating a reg->reg copy
3814 we've verified to be valid. */
3816 new_rtx
= emit_insn_after (gen_move_insn (dest
, src
), insn
);
3818 /* Note the equivalence for local CSE pass. */
3819 set2
= single_set (new_rtx
);
3820 if (!set2
|| !rtx_equal_p (SET_DEST (set2
), dest
))
3822 if ((note
= find_reg_equal_equiv_note (insn
)))
3823 eqv
= XEXP (note
, 0);
3825 eqv
= SET_SRC (set
);
3827 set_unique_reg_note (new_rtx
, REG_EQUAL
, copy_insn_1 (eqv
));
3832 /* Delete redundant computations.
3833 Deletion is done by changing the insn to copy the `reaching_reg' of
3834 the expression into the result of the SET. It is left to later passes
3835 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
3837 Returns nonzero if a change is made. */
3848 for (i
= 0; i
< expr_hash_table
.size
; i
++)
3849 for (expr
= expr_hash_table
.table
[i
];
3851 expr
= expr
->next_same_hash
)
3853 int indx
= expr
->bitmap_index
;
3855 /* We only need to search antic_occr since we require
3858 for (occr
= expr
->antic_occr
; occr
!= NULL
; occr
= occr
->next
)
3860 rtx insn
= occr
->insn
;
3862 basic_block bb
= BLOCK_FOR_INSN (insn
);
3864 /* We only delete insns that have a single_set. */
3865 if (TEST_BIT (pre_delete_map
[bb
->index
], indx
)
3866 && (set
= single_set (insn
)) != 0
3867 && dbg_cnt (pre_insn
))
3869 /* Create a pseudo-reg to store the result of reaching
3870 expressions into. Get the mode for the new pseudo from
3871 the mode of the original destination pseudo. */
3872 if (expr
->reaching_reg
== NULL
)
3873 expr
->reaching_reg
= gen_reg_rtx_and_attrs (SET_DEST (set
));
3875 gcse_emit_move_after (expr
->reaching_reg
, SET_DEST (set
), insn
);
3877 occr
->deleted_p
= 1;
3884 "PRE: redundant insn %d (expression %d) in ",
3885 INSN_UID (insn
), indx
);
3886 fprintf (dump_file
, "bb %d, reaching reg is %d\n",
3887 bb
->index
, REGNO (expr
->reaching_reg
));
3896 /* Perform GCSE optimizations using PRE.
3897 This is called by one_pre_gcse_pass after all the dataflow analysis
3900 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
3901 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
3902 Compiler Design and Implementation.
3904 ??? A new pseudo reg is created to hold the reaching expression. The nice
3905 thing about the classical approach is that it would try to use an existing
3906 reg. If the register can't be adequately optimized [i.e. we introduce
3907 reload problems], one could add a pass here to propagate the new register
3910 ??? We don't handle single sets in PARALLELs because we're [currently] not
3911 able to copy the rest of the parallel when we insert copies to create full
3912 redundancies from partial redundancies. However, there's no reason why we
3913 can't handle PARALLELs in the cases where there are no partial
3920 int did_insert
, changed
;
3921 struct expr
**index_map
;
3924 /* Compute a mapping from expression number (`bitmap_index') to
3925 hash table entry. */
3927 index_map
= XCNEWVEC (struct expr
*, expr_hash_table
.n_elems
);
3928 for (i
= 0; i
< expr_hash_table
.size
; i
++)
3929 for (expr
= expr_hash_table
.table
[i
]; expr
!= NULL
; expr
= expr
->next_same_hash
)
3930 index_map
[expr
->bitmap_index
] = expr
;
3932 /* Delete the redundant insns first so that
3933 - we know what register to use for the new insns and for the other
3934 ones with reaching expressions
3935 - we know which insns are redundant when we go to create copies */
3937 changed
= pre_delete ();
3938 did_insert
= pre_edge_insert (edge_list
, index_map
);
3940 /* In other places with reaching expressions, copy the expression to the
3941 specially allocated pseudo-reg that reaches the redundant expr. */
3942 pre_insert_copies ();
3945 commit_edge_insertions ();
3953 /* Top level routine to perform one PRE GCSE pass.
3955 Return nonzero if a change was made. */
3958 one_pre_gcse_pass (void)
3962 gcse_subst_count
= 0;
3963 gcse_create_count
= 0;
3965 /* Return if there's nothing to do, or it is too expensive. */
3966 if (n_basic_blocks
<= NUM_FIXED_BLOCKS
+ 1
3967 || is_too_expensive (_("PRE disabled")))
3970 /* We need alias. */
3971 init_alias_analysis ();
3974 gcc_obstack_init (&gcse_obstack
);
3977 alloc_hash_table (&expr_hash_table
, 0);
3978 add_noreturn_fake_exit_edges ();
3980 compute_ld_motion_mems ();
3982 compute_hash_table (&expr_hash_table
);
3983 trim_ld_motion_mems ();
3985 dump_hash_table (dump_file
, "Expression", &expr_hash_table
);
3987 if (expr_hash_table
.n_elems
> 0)
3989 alloc_pre_mem (last_basic_block
, expr_hash_table
.n_elems
);
3990 compute_pre_data ();
3991 changed
|= pre_gcse ();
3992 free_edge_list (edge_list
);
3997 remove_fake_exit_edges ();
3998 free_hash_table (&expr_hash_table
);
4001 obstack_free (&gcse_obstack
, NULL
);
4003 /* We are finished with alias. */
4004 end_alias_analysis ();
4008 fprintf (dump_file
, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
4009 current_function_name (), n_basic_blocks
, bytes_used
);
4010 fprintf (dump_file
, "%d substs, %d insns created\n",
4011 gcse_subst_count
, gcse_create_count
);
4017 /* If X contains any LABEL_REF's, add REG_LABEL_OPERAND notes for them
4018 to INSN. If such notes are added to an insn which references a
4019 CODE_LABEL, the LABEL_NUSES count is incremented. We have to add
4020 that note, because the following loop optimization pass requires
4023 /* ??? If there was a jump optimization pass after gcse and before loop,
4024 then we would not need to do this here, because jump would add the
4025 necessary REG_LABEL_OPERAND and REG_LABEL_TARGET notes. */
4028 add_label_notes (rtx x
, rtx insn
)
4030 enum rtx_code code
= GET_CODE (x
);
4034 if (code
== LABEL_REF
&& !LABEL_REF_NONLOCAL_P (x
))
4036 /* This code used to ignore labels that referred to dispatch tables to
4037 avoid flow generating (slightly) worse code.
4039 We no longer ignore such label references (see LABEL_REF handling in
4040 mark_jump_label for additional information). */
4042 /* There's no reason for current users to emit jump-insns with
4043 such a LABEL_REF, so we don't have to handle REG_LABEL_TARGET
4045 gcc_assert (!JUMP_P (insn
));
4046 add_reg_note (insn
, REG_LABEL_OPERAND
, XEXP (x
, 0));
4048 if (LABEL_P (XEXP (x
, 0)))
4049 LABEL_NUSES (XEXP (x
, 0))++;
4054 for (i
= GET_RTX_LENGTH (code
) - 1, fmt
= GET_RTX_FORMAT (code
); i
>= 0; i
--)
4057 add_label_notes (XEXP (x
, i
), insn
);
4058 else if (fmt
[i
] == 'E')
4059 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
4060 add_label_notes (XVECEXP (x
, i
, j
), insn
);
4064 /* Compute transparent outgoing information for each block.
4066 An expression is transparent to an edge unless it is killed by
4067 the edge itself. This can only happen with abnormal control flow,
4068 when the edge is traversed through a call. This happens with
4069 non-local labels and exceptions.
4071 This would not be necessary if we split the edge. While this is
4072 normally impossible for abnormal critical edges, with some effort
4073 it should be possible with exception handling, since we still have
4074 control over which handler should be invoked. But due to increased
4075 EH table sizes, this may not be worthwhile. */
4078 compute_transpout (void)
4084 sbitmap_vector_ones (transpout
, last_basic_block
);
4088 /* Note that flow inserted a nop at the end of basic blocks that
4089 end in call instructions for reasons other than abnormal
4091 if (! CALL_P (BB_END (bb
)))
4094 for (i
= 0; i
< expr_hash_table
.size
; i
++)
4095 for (expr
= expr_hash_table
.table
[i
]; expr
; expr
= expr
->next_same_hash
)
4096 if (MEM_P (expr
->expr
))
4098 if (GET_CODE (XEXP (expr
->expr
, 0)) == SYMBOL_REF
4099 && CONSTANT_POOL_ADDRESS_P (XEXP (expr
->expr
, 0)))
4102 /* ??? Optimally, we would use interprocedural alias
4103 analysis to determine if this mem is actually killed
4105 RESET_BIT (transpout
[bb
->index
], expr
->bitmap_index
);
4110 /* Code Hoisting variables and subroutines. */
4112 /* Very busy expressions. */
4113 static sbitmap
*hoist_vbein
;
4114 static sbitmap
*hoist_vbeout
;
4116 /* Hoistable expressions. */
4117 static sbitmap
*hoist_exprs
;
4119 /* ??? We could compute post dominators and run this algorithm in
4120 reverse to perform tail merging, doing so would probably be
4121 more effective than the tail merging code in jump.c.
4123 It's unclear if tail merging could be run in parallel with
4124 code hoisting. It would be nice. */
4126 /* Allocate vars used for code hoisting analysis. */
4129 alloc_code_hoist_mem (int n_blocks
, int n_exprs
)
4131 antloc
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
4132 transp
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
4133 comp
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
4135 hoist_vbein
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
4136 hoist_vbeout
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
4137 hoist_exprs
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
4138 transpout
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
4141 /* Free vars used for code hoisting analysis. */
4144 free_code_hoist_mem (void)
4146 sbitmap_vector_free (antloc
);
4147 sbitmap_vector_free (transp
);
4148 sbitmap_vector_free (comp
);
4150 sbitmap_vector_free (hoist_vbein
);
4151 sbitmap_vector_free (hoist_vbeout
);
4152 sbitmap_vector_free (hoist_exprs
);
4153 sbitmap_vector_free (transpout
);
4155 free_dominance_info (CDI_DOMINATORS
);
4158 /* Compute the very busy expressions at entry/exit from each block.
4160 An expression is very busy if all paths from a given point
4161 compute the expression. */
4164 compute_code_hoist_vbeinout (void)
4166 int changed
, passes
;
4169 sbitmap_vector_zero (hoist_vbeout
, last_basic_block
);
4170 sbitmap_vector_zero (hoist_vbein
, last_basic_block
);
4179 /* We scan the blocks in the reverse order to speed up
4181 FOR_EACH_BB_REVERSE (bb
)
4183 if (bb
->next_bb
!= EXIT_BLOCK_PTR
)
4184 sbitmap_intersection_of_succs (hoist_vbeout
[bb
->index
],
4185 hoist_vbein
, bb
->index
);
4187 changed
|= sbitmap_a_or_b_and_c_cg (hoist_vbein
[bb
->index
],
4189 hoist_vbeout
[bb
->index
],
4197 fprintf (dump_file
, "hoisting vbeinout computation: %d passes\n", passes
);
4200 /* Top level routine to do the dataflow analysis needed by code hoisting. */
4203 compute_code_hoist_data (void)
4205 compute_local_properties (transp
, comp
, antloc
, &expr_hash_table
);
4206 compute_transpout ();
4207 compute_code_hoist_vbeinout ();
4208 calculate_dominance_info (CDI_DOMINATORS
);
4210 fprintf (dump_file
, "\n");
4213 /* Determine if the expression identified by EXPR_INDEX would
4214 reach BB unimpared if it was placed at the end of EXPR_BB.
4216 It's unclear exactly what Muchnick meant by "unimpared". It seems
4217 to me that the expression must either be computed or transparent in
4218 *every* block in the path(s) from EXPR_BB to BB. Any other definition
4219 would allow the expression to be hoisted out of loops, even if
4220 the expression wasn't a loop invariant.
4222 Contrast this to reachability for PRE where an expression is
4223 considered reachable if *any* path reaches instead of *all*
4227 hoist_expr_reaches_here_p (basic_block expr_bb
, int expr_index
, basic_block bb
, char *visited
)
4231 int visited_allocated_locally
= 0;
4234 if (visited
== NULL
)
4236 visited_allocated_locally
= 1;
4237 visited
= XCNEWVEC (char, last_basic_block
);
4240 FOR_EACH_EDGE (pred
, ei
, bb
->preds
)
4242 basic_block pred_bb
= pred
->src
;
4244 if (pred
->src
== ENTRY_BLOCK_PTR
)
4246 else if (pred_bb
== expr_bb
)
4248 else if (visited
[pred_bb
->index
])
4251 /* Does this predecessor generate this expression? */
4252 else if (TEST_BIT (comp
[pred_bb
->index
], expr_index
))
4254 else if (! TEST_BIT (transp
[pred_bb
->index
], expr_index
))
4260 visited
[pred_bb
->index
] = 1;
4261 if (! hoist_expr_reaches_here_p (expr_bb
, expr_index
,
4266 if (visited_allocated_locally
)
4269 return (pred
== NULL
);
4272 /* Actually perform code hoisting. */
4277 basic_block bb
, dominated
;
4278 VEC (basic_block
, heap
) *domby
;
4280 struct expr
**index_map
;
4284 sbitmap_vector_zero (hoist_exprs
, last_basic_block
);
4286 /* Compute a mapping from expression number (`bitmap_index') to
4287 hash table entry. */
4289 index_map
= XCNEWVEC (struct expr
*, expr_hash_table
.n_elems
);
4290 for (i
= 0; i
< expr_hash_table
.size
; i
++)
4291 for (expr
= expr_hash_table
.table
[i
]; expr
!= NULL
; expr
= expr
->next_same_hash
)
4292 index_map
[expr
->bitmap_index
] = expr
;
4294 /* Walk over each basic block looking for potentially hoistable
4295 expressions, nothing gets hoisted from the entry block. */
4299 int insn_inserted_p
;
4301 domby
= get_dominated_by (CDI_DOMINATORS
, bb
);
4302 /* Examine each expression that is very busy at the exit of this
4303 block. These are the potentially hoistable expressions. */
4304 for (i
= 0; i
< hoist_vbeout
[bb
->index
]->n_bits
; i
++)
4308 if (TEST_BIT (hoist_vbeout
[bb
->index
], i
)
4309 && TEST_BIT (transpout
[bb
->index
], i
))
4311 /* We've found a potentially hoistable expression, now
4312 we look at every block BB dominates to see if it
4313 computes the expression. */
4314 for (j
= 0; VEC_iterate (basic_block
, domby
, j
, dominated
); j
++)
4316 /* Ignore self dominance. */
4317 if (bb
== dominated
)
4319 /* We've found a dominated block, now see if it computes
4320 the busy expression and whether or not moving that
4321 expression to the "beginning" of that block is safe. */
4322 if (!TEST_BIT (antloc
[dominated
->index
], i
))
4325 /* Note if the expression would reach the dominated block
4326 unimpared if it was placed at the end of BB.
4328 Keep track of how many times this expression is hoistable
4329 from a dominated block into BB. */
4330 if (hoist_expr_reaches_here_p (bb
, i
, dominated
, NULL
))
4334 /* If we found more than one hoistable occurrence of this
4335 expression, then note it in the bitmap of expressions to
4336 hoist. It makes no sense to hoist things which are computed
4337 in only one BB, and doing so tends to pessimize register
4338 allocation. One could increase this value to try harder
4339 to avoid any possible code expansion due to register
4340 allocation issues; however experiments have shown that
4341 the vast majority of hoistable expressions are only movable
4342 from two successors, so raising this threshold is likely
4343 to nullify any benefit we get from code hoisting. */
4346 SET_BIT (hoist_exprs
[bb
->index
], i
);
4351 /* If we found nothing to hoist, then quit now. */
4354 VEC_free (basic_block
, heap
, domby
);
4358 /* Loop over all the hoistable expressions. */
4359 for (i
= 0; i
< hoist_exprs
[bb
->index
]->n_bits
; i
++)
4361 /* We want to insert the expression into BB only once, so
4362 note when we've inserted it. */
4363 insn_inserted_p
= 0;
4365 /* These tests should be the same as the tests above. */
4366 if (TEST_BIT (hoist_exprs
[bb
->index
], i
))
4368 /* We've found a potentially hoistable expression, now
4369 we look at every block BB dominates to see if it
4370 computes the expression. */
4371 for (j
= 0; VEC_iterate (basic_block
, domby
, j
, dominated
); j
++)
4373 /* Ignore self dominance. */
4374 if (bb
== dominated
)
4377 /* We've found a dominated block, now see if it computes
4378 the busy expression and whether or not moving that
4379 expression to the "beginning" of that block is safe. */
4380 if (!TEST_BIT (antloc
[dominated
->index
], i
))
4383 /* The expression is computed in the dominated block and
4384 it would be safe to compute it at the start of the
4385 dominated block. Now we have to determine if the
4386 expression would reach the dominated block if it was
4387 placed at the end of BB. */
4388 if (hoist_expr_reaches_here_p (bb
, i
, dominated
, NULL
))
4390 struct expr
*expr
= index_map
[i
];
4391 struct occr
*occr
= expr
->antic_occr
;
4395 /* Find the right occurrence of this expression. */
4396 while (BLOCK_FOR_INSN (occr
->insn
) != dominated
&& occr
)
4401 set
= single_set (insn
);
4404 /* Create a pseudo-reg to store the result of reaching
4405 expressions into. Get the mode for the new pseudo
4406 from the mode of the original destination pseudo. */
4407 if (expr
->reaching_reg
== NULL
)
4409 = gen_reg_rtx_and_attrs (SET_DEST (set
));
4411 gcse_emit_move_after (expr
->reaching_reg
, SET_DEST (set
), insn
);
4413 occr
->deleted_p
= 1;
4417 if (!insn_inserted_p
)
4419 insert_insn_end_basic_block (index_map
[i
], bb
, 0);
4420 insn_inserted_p
= 1;
4426 VEC_free (basic_block
, heap
, domby
);
4434 /* Top level routine to perform one code hoisting (aka unification) pass
4436 Return nonzero if a change was made. */
4439 one_code_hoisting_pass (void)
4443 gcse_subst_count
= 0;
4444 gcse_create_count
= 0;
4446 /* Return if there's nothing to do, or it is too expensive. */
4447 if (n_basic_blocks
<= NUM_FIXED_BLOCKS
+ 1
4448 || is_too_expensive (_("GCSE disabled")))
4451 /* We need alias. */
4452 init_alias_analysis ();
4455 gcc_obstack_init (&gcse_obstack
);
4458 alloc_hash_table (&expr_hash_table
, 0);
4459 compute_hash_table (&expr_hash_table
);
4461 dump_hash_table (dump_file
, "Code Hosting Expressions", &expr_hash_table
);
4463 if (expr_hash_table
.n_elems
> 0)
4465 alloc_code_hoist_mem (last_basic_block
, expr_hash_table
.n_elems
);
4466 compute_code_hoist_data ();
4467 changed
= hoist_code ();
4468 free_code_hoist_mem ();
4471 free_hash_table (&expr_hash_table
);
4473 obstack_free (&gcse_obstack
, NULL
);
4475 /* We are finished with alias. */
4476 end_alias_analysis ();
4480 fprintf (dump_file
, "HOIST of %s, %d basic blocks, %d bytes needed, ",
4481 current_function_name (), n_basic_blocks
, bytes_used
);
4482 fprintf (dump_file
, "%d substs, %d insns created\n",
4483 gcse_subst_count
, gcse_create_count
);
4489 /* Here we provide the things required to do store motion towards
4490 the exit. In order for this to be effective, gcse also needed to
4491 be taught how to move a load when it is kill only by a store to itself.
4496 void foo(float scale)
4498 for (i=0; i<10; i++)
4502 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
4503 the load out since its live around the loop, and stored at the bottom
4506 The 'Load Motion' referred to and implemented in this file is
4507 an enhancement to gcse which when using edge based lcm, recognizes
4508 this situation and allows gcse to move the load out of the loop.
4510 Once gcse has hoisted the load, store motion can then push this
4511 load towards the exit, and we end up with no loads or stores of 'i'
4515 pre_ldst_expr_hash (const void *p
)
4517 int do_not_record_p
= 0;
4518 const struct ls_expr
*const x
= (const struct ls_expr
*) p
;
4519 return hash_rtx (x
->pattern
, GET_MODE (x
->pattern
), &do_not_record_p
, NULL
, false);
4523 pre_ldst_expr_eq (const void *p1
, const void *p2
)
4525 const struct ls_expr
*const ptr1
= (const struct ls_expr
*) p1
,
4526 *const ptr2
= (const struct ls_expr
*) p2
;
4527 return expr_equiv_p (ptr1
->pattern
, ptr2
->pattern
);
4530 /* This will search the ldst list for a matching expression. If it
4531 doesn't find one, we create one and initialize it. */
4533 static struct ls_expr
*
4536 int do_not_record_p
= 0;
4537 struct ls_expr
* ptr
;
4542 hash
= hash_rtx (x
, GET_MODE (x
), &do_not_record_p
,
4543 NULL
, /*have_reg_qty=*/false);
4546 slot
= htab_find_slot_with_hash (pre_ldst_table
, &e
, hash
, INSERT
);
4548 return (struct ls_expr
*)*slot
;
4550 ptr
= XNEW (struct ls_expr
);
4552 ptr
->next
= pre_ldst_mems
;
4555 ptr
->pattern_regs
= NULL_RTX
;
4556 ptr
->loads
= NULL_RTX
;
4557 ptr
->stores
= NULL_RTX
;
4558 ptr
->reaching_reg
= NULL_RTX
;
4561 ptr
->hash_index
= hash
;
4562 pre_ldst_mems
= ptr
;
4568 /* Free up an individual ldst entry. */
4571 free_ldst_entry (struct ls_expr
* ptr
)
4573 free_INSN_LIST_list (& ptr
->loads
);
4574 free_INSN_LIST_list (& ptr
->stores
);
4579 /* Free up all memory associated with the ldst list. */
4582 free_ldst_mems (void)
4585 htab_delete (pre_ldst_table
);
4586 pre_ldst_table
= NULL
;
4588 while (pre_ldst_mems
)
4590 struct ls_expr
* tmp
= pre_ldst_mems
;
4592 pre_ldst_mems
= pre_ldst_mems
->next
;
4594 free_ldst_entry (tmp
);
4597 pre_ldst_mems
= NULL
;
4600 /* Dump debugging info about the ldst list. */
4603 print_ldst_list (FILE * file
)
4605 struct ls_expr
* ptr
;
4607 fprintf (file
, "LDST list: \n");
4609 for (ptr
= first_ls_expr (); ptr
!= NULL
; ptr
= next_ls_expr (ptr
))
4611 fprintf (file
, " Pattern (%3d): ", ptr
->index
);
4613 print_rtl (file
, ptr
->pattern
);
4615 fprintf (file
, "\n Loads : ");
4618 print_rtl (file
, ptr
->loads
);
4620 fprintf (file
, "(nil)");
4622 fprintf (file
, "\n Stores : ");
4625 print_rtl (file
, ptr
->stores
);
4627 fprintf (file
, "(nil)");
4629 fprintf (file
, "\n\n");
4632 fprintf (file
, "\n");
4635 /* Returns 1 if X is in the list of ldst only expressions. */
4637 static struct ls_expr
*
4638 find_rtx_in_ldst (rtx x
)
4642 if (!pre_ldst_table
)
4645 slot
= htab_find_slot (pre_ldst_table
, &e
, NO_INSERT
);
4646 if (!slot
|| ((struct ls_expr
*)*slot
)->invalid
)
4648 return (struct ls_expr
*) *slot
;
4651 /* Return first item in the list. */
4653 static inline struct ls_expr
*
4654 first_ls_expr (void)
4656 return pre_ldst_mems
;
4659 /* Return the next item in the list after the specified one. */
4661 static inline struct ls_expr
*
4662 next_ls_expr (struct ls_expr
* ptr
)
4667 /* Load Motion for loads which only kill themselves. */
4669 /* Return true if x is a simple MEM operation, with no registers or
4670 side effects. These are the types of loads we consider for the
4671 ld_motion list, otherwise we let the usual aliasing take care of it. */
4674 simple_mem (const_rtx x
)
4679 if (MEM_VOLATILE_P (x
))
4682 if (GET_MODE (x
) == BLKmode
)
4685 /* If we are handling exceptions, we must be careful with memory references
4686 that may trap. If we are not, the behavior is undefined, so we may just
4688 if (flag_non_call_exceptions
&& may_trap_p (x
))
4691 if (side_effects_p (x
))
4694 /* Do not consider function arguments passed on stack. */
4695 if (reg_mentioned_p (stack_pointer_rtx
, x
))
4698 if (flag_float_store
&& FLOAT_MODE_P (GET_MODE (x
)))
4704 /* Make sure there isn't a buried reference in this pattern anywhere.
4705 If there is, invalidate the entry for it since we're not capable
4706 of fixing it up just yet.. We have to be sure we know about ALL
4707 loads since the aliasing code will allow all entries in the
4708 ld_motion list to not-alias itself. If we miss a load, we will get
4709 the wrong value since gcse might common it and we won't know to
4713 invalidate_any_buried_refs (rtx x
)
4717 struct ls_expr
* ptr
;
4719 /* Invalidate it in the list. */
4720 if (MEM_P (x
) && simple_mem (x
))
4722 ptr
= ldst_entry (x
);
4726 /* Recursively process the insn. */
4727 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
4729 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
4732 invalidate_any_buried_refs (XEXP (x
, i
));
4733 else if (fmt
[i
] == 'E')
4734 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
4735 invalidate_any_buried_refs (XVECEXP (x
, i
, j
));
4739 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
4740 being defined as MEM loads and stores to symbols, with no side effects
4741 and no registers in the expression. For a MEM destination, we also
4742 check that the insn is still valid if we replace the destination with a
4743 REG, as is done in update_ld_motion_stores. If there are any uses/defs
4744 which don't match this criteria, they are invalidated and trimmed out
4748 compute_ld_motion_mems (void)
4750 struct ls_expr
* ptr
;
4754 pre_ldst_mems
= NULL
;
4755 pre_ldst_table
= htab_create (13, pre_ldst_expr_hash
,
4756 pre_ldst_expr_eq
, NULL
);
4760 FOR_BB_INSNS (bb
, insn
)
4762 if (NONDEBUG_INSN_P (insn
))
4764 if (GET_CODE (PATTERN (insn
)) == SET
)
4766 rtx src
= SET_SRC (PATTERN (insn
));
4767 rtx dest
= SET_DEST (PATTERN (insn
));
4769 /* Check for a simple LOAD... */
4770 if (MEM_P (src
) && simple_mem (src
))
4772 ptr
= ldst_entry (src
);
4774 ptr
->loads
= alloc_INSN_LIST (insn
, ptr
->loads
);
4780 /* Make sure there isn't a buried load somewhere. */
4781 invalidate_any_buried_refs (src
);
4784 /* Check for stores. Don't worry about aliased ones, they
4785 will block any movement we might do later. We only care
4786 about this exact pattern since those are the only
4787 circumstance that we will ignore the aliasing info. */
4788 if (MEM_P (dest
) && simple_mem (dest
))
4790 ptr
= ldst_entry (dest
);
4793 && GET_CODE (src
) != ASM_OPERANDS
4794 /* Check for REG manually since want_to_gcse_p
4795 returns 0 for all REGs. */
4796 && can_assign_to_reg_without_clobbers_p (src
))
4797 ptr
->stores
= alloc_INSN_LIST (insn
, ptr
->stores
);
4803 invalidate_any_buried_refs (PATTERN (insn
));
4809 /* Remove any references that have been either invalidated or are not in the
4810 expression list for pre gcse. */
4813 trim_ld_motion_mems (void)
4815 struct ls_expr
* * last
= & pre_ldst_mems
;
4816 struct ls_expr
* ptr
= pre_ldst_mems
;
4822 /* Delete if entry has been made invalid. */
4825 /* Delete if we cannot find this mem in the expression list. */
4826 unsigned int hash
= ptr
->hash_index
% expr_hash_table
.size
;
4828 for (expr
= expr_hash_table
.table
[hash
];
4830 expr
= expr
->next_same_hash
)
4831 if (expr_equiv_p (expr
->expr
, ptr
->pattern
))
4835 expr
= (struct expr
*) 0;
4839 /* Set the expression field if we are keeping it. */
4847 htab_remove_elt_with_hash (pre_ldst_table
, ptr
, ptr
->hash_index
);
4848 free_ldst_entry (ptr
);
4853 /* Show the world what we've found. */
4854 if (dump_file
&& pre_ldst_mems
!= NULL
)
4855 print_ldst_list (dump_file
);
4858 /* This routine will take an expression which we are replacing with
4859 a reaching register, and update any stores that are needed if
4860 that expression is in the ld_motion list. Stores are updated by
4861 copying their SRC to the reaching register, and then storing
4862 the reaching register into the store location. These keeps the
4863 correct value in the reaching register for the loads. */
4866 update_ld_motion_stores (struct expr
* expr
)
4868 struct ls_expr
* mem_ptr
;
4870 if ((mem_ptr
= find_rtx_in_ldst (expr
->expr
)))
4872 /* We can try to find just the REACHED stores, but is shouldn't
4873 matter to set the reaching reg everywhere... some might be
4874 dead and should be eliminated later. */
4876 /* We replace (set mem expr) with (set reg expr) (set mem reg)
4877 where reg is the reaching reg used in the load. We checked in
4878 compute_ld_motion_mems that we can replace (set mem expr) with
4879 (set reg expr) in that insn. */
4880 rtx list
= mem_ptr
->stores
;
4882 for ( ; list
!= NULL_RTX
; list
= XEXP (list
, 1))
4884 rtx insn
= XEXP (list
, 0);
4885 rtx pat
= PATTERN (insn
);
4886 rtx src
= SET_SRC (pat
);
4887 rtx reg
= expr
->reaching_reg
;
4890 /* If we've already copied it, continue. */
4891 if (expr
->reaching_reg
== src
)
4896 fprintf (dump_file
, "PRE: store updated with reaching reg ");
4897 print_rtl (dump_file
, expr
->reaching_reg
);
4898 fprintf (dump_file
, ":\n ");
4899 print_inline_rtx (dump_file
, insn
, 8);
4900 fprintf (dump_file
, "\n");
4903 copy
= gen_move_insn (reg
, copy_rtx (SET_SRC (pat
)));
4904 new_rtx
= emit_insn_before (copy
, insn
);
4905 SET_SRC (pat
) = reg
;
4906 df_insn_rescan (insn
);
4908 /* un-recognize this pattern since it's probably different now. */
4909 INSN_CODE (insn
) = -1;
4910 gcse_create_count
++;
4915 /* Return true if the graph is too expensive to optimize. PASS is the
4916 optimization about to be performed. */
4919 is_too_expensive (const char *pass
)
4921 /* Trying to perform global optimizations on flow graphs which have
4922 a high connectivity will take a long time and is unlikely to be
4923 particularly useful.
4925 In normal circumstances a cfg should have about twice as many
4926 edges as blocks. But we do not want to punish small functions
4927 which have a couple switch statements. Rather than simply
4928 threshold the number of blocks, uses something with a more
4929 graceful degradation. */
4930 if (n_edges
> 20000 + n_basic_blocks
* 4)
4932 warning (OPT_Wdisabled_optimization
,
4933 "%s: %d basic blocks and %d edges/basic block",
4934 pass
, n_basic_blocks
, n_edges
/ n_basic_blocks
);
4939 /* If allocating memory for the cprop bitmap would take up too much
4940 storage it's better just to disable the optimization. */
4942 * SBITMAP_SET_SIZE (max_reg_num ())
4943 * sizeof (SBITMAP_ELT_TYPE
)) > MAX_GCSE_MEMORY
)
4945 warning (OPT_Wdisabled_optimization
,
4946 "%s: %d basic blocks and %d registers",
4947 pass
, n_basic_blocks
, max_reg_num ());
4956 /* Main function for the CPROP pass. */
4959 one_cprop_pass (void)
4963 /* Return if there's nothing to do, or it is too expensive. */
4964 if (n_basic_blocks
<= NUM_FIXED_BLOCKS
+ 1
4965 || is_too_expensive (_ ("const/copy propagation disabled")))
4968 global_const_prop_count
= local_const_prop_count
= 0;
4969 global_copy_prop_count
= local_copy_prop_count
= 0;
4972 gcc_obstack_init (&gcse_obstack
);
4975 /* Do a local const/copy propagation pass first. The global pass
4976 only handles global opportunities.
4977 If the local pass changes something, remove any unreachable blocks
4978 because the CPROP global dataflow analysis may get into infinite
4979 loops for CFGs with unreachable blocks.
4981 FIXME: This local pass should not be necessary after CSE (but for
4982 some reason it still is). It is also (proven) not necessary
4983 to run the local pass right after FWPWOP.
4985 FIXME: The global analysis would not get into infinite loops if it
4986 would use the DF solver (via df_simple_dataflow) instead of
4987 the solver implemented in this file. */
4988 if (local_cprop_pass ())
4990 delete_unreachable_blocks ();
4994 /* Determine implicit sets. */
4995 implicit_sets
= XCNEWVEC (rtx
, last_basic_block
);
4996 find_implicit_sets ();
4998 alloc_hash_table (&set_hash_table
, 1);
4999 compute_hash_table (&set_hash_table
);
5001 /* Free implicit_sets before peak usage. */
5002 free (implicit_sets
);
5003 implicit_sets
= NULL
;
5006 dump_hash_table (dump_file
, "SET", &set_hash_table
);
5007 if (set_hash_table
.n_elems
> 0)
5012 alloc_cprop_mem (last_basic_block
, set_hash_table
.n_elems
);
5013 compute_cprop_data ();
5015 FOR_BB_BETWEEN (bb
, ENTRY_BLOCK_PTR
->next_bb
->next_bb
, EXIT_BLOCK_PTR
, next_bb
)
5017 /* Reset tables used to keep track of what's still valid [since
5018 the start of the block]. */
5019 reset_opr_set_tables ();
5021 FOR_BB_INSNS (bb
, insn
)
5024 changed
|= cprop_insn (insn
);
5026 /* Keep track of everything modified by this insn. */
5027 /* ??? Need to be careful w.r.t. mods done to INSN.
5028 Don't call mark_oprs_set if we turned the
5029 insn into a NOTE. */
5030 if (! NOTE_P (insn
))
5031 mark_oprs_set (insn
);
5035 changed
|= bypass_conditional_jumps ();
5039 free_hash_table (&set_hash_table
);
5041 obstack_free (&gcse_obstack
, NULL
);
5045 fprintf (dump_file
, "CPROP of %s, %d basic blocks, %d bytes needed, ",
5046 current_function_name (), n_basic_blocks
, bytes_used
);
5047 fprintf (dump_file
, "%d local const props, %d local copy props, ",
5048 local_const_prop_count
, local_copy_prop_count
);
5049 fprintf (dump_file
, "%d global const props, %d global copy props\n\n",
5050 global_const_prop_count
, global_copy_prop_count
);
5057 /* All the passes implemented in this file. Each pass has its
5058 own gate and execute function, and at the end of the file a
5059 pass definition for passes.c.
5061 We do not construct an accurate cfg in functions which call
5062 setjmp, so none of these passes runs if the function calls
5064 FIXME: Should just handle setjmp via REG_SETJMP notes. */
5067 gate_rtl_cprop (void)
5069 return optimize
> 0 && flag_gcse
5070 && !cfun
->calls_setjmp
5075 execute_rtl_cprop (void)
5077 delete_unreachable_blocks ();
5078 df_note_add_problem ();
5079 df_set_flags (DF_LR_RUN_DCE
);
5081 flag_rerun_cse_after_global_opts
|= one_cprop_pass ();
5088 return optimize
> 0 && flag_gcse
5089 && !cfun
->calls_setjmp
5090 && optimize_function_for_speed_p (cfun
)
5095 execute_rtl_pre (void)
5097 delete_unreachable_blocks ();
5098 df_note_add_problem ();
5100 flag_rerun_cse_after_global_opts
|= one_pre_gcse_pass ();
5105 gate_rtl_hoist (void)
5107 return optimize
> 0 && flag_gcse
5108 && !cfun
->calls_setjmp
5109 /* It does not make sense to run code hoisting unless we are optimizing
5110 for code size -- it rarely makes programs faster, and can make then
5111 bigger if we did PRE (when optimizing for space, we don't run PRE). */
5112 && optimize_function_for_size_p (cfun
)
5117 execute_rtl_hoist (void)
5119 delete_unreachable_blocks ();
5120 df_note_add_problem ();
5122 flag_rerun_cse_after_global_opts
|= one_code_hoisting_pass ();
5126 struct rtl_opt_pass pass_rtl_cprop
=
5131 gate_rtl_cprop
, /* gate */
5132 execute_rtl_cprop
, /* execute */
5135 0, /* static_pass_number */
5136 TV_CPROP
, /* tv_id */
5137 PROP_cfglayout
, /* properties_required */
5138 0, /* properties_provided */
5139 0, /* properties_destroyed */
5140 0, /* todo_flags_start */
5141 TODO_df_finish
| TODO_verify_rtl_sharing
|
5143 TODO_verify_flow
| TODO_ggc_collect
/* todo_flags_finish */
5147 struct rtl_opt_pass pass_rtl_pre
=
5152 gate_rtl_pre
, /* gate */
5153 execute_rtl_pre
, /* execute */
5156 0, /* static_pass_number */
5158 PROP_cfglayout
, /* properties_required */
5159 0, /* properties_provided */
5160 0, /* properties_destroyed */
5161 0, /* todo_flags_start */
5162 TODO_df_finish
| TODO_verify_rtl_sharing
|
5164 TODO_verify_flow
| TODO_ggc_collect
/* todo_flags_finish */
5168 struct rtl_opt_pass pass_rtl_hoist
=
5173 gate_rtl_hoist
, /* gate */
5174 execute_rtl_hoist
, /* execute */
5177 0, /* static_pass_number */
5178 TV_HOIST
, /* tv_id */
5179 PROP_cfglayout
, /* properties_required */
5180 0, /* properties_provided */
5181 0, /* properties_destroyed */
5182 0, /* todo_flags_start */
5183 TODO_df_finish
| TODO_verify_rtl_sharing
|
5185 TODO_verify_flow
| TODO_ggc_collect
/* todo_flags_finish */
5189 #include "gt-gcse.h"