1 /* Global common subexpression elimination/Partial redundancy elimination
2 and global constant/copy propagation for GNU compiler.
3 Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
4 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 - reordering of memory allocation and freeing to be more space efficient
24 - do rough calc of how many regs are needed in each block, and a rough
25 calc of how many regs are available in each class and use that to
26 throttle back the code in cases where RTX_COST is minimal.
27 - a store to the same address as a load does not kill the load if the
28 source of the store is also the destination of the load. Handling this
29 allows more load motion, particularly out of loops.
33 /* References searched while implementing this.
35 Compilers Principles, Techniques and Tools
39 Global Optimization by Suppression of Partial Redundancies
41 communications of the acm, Vol. 22, Num. 2, Feb. 1979
43 A Portable Machine-Independent Global Optimizer - Design and Measurements
45 Stanford Ph.D. thesis, Dec. 1983
47 A Fast Algorithm for Code Movement Optimization
49 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
51 A Solution to a Problem with Morel and Renvoise's
52 Global Optimization by Suppression of Partial Redundancies
53 K-H Drechsler, M.P. Stadel
54 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
56 Practical Adaptation of the Global Optimization
57 Algorithm of Morel and Renvoise
59 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
61 Efficiently Computing Static Single Assignment Form and the Control
63 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
64 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
67 J. Knoop, O. Ruthing, B. Steffen
68 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
70 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
71 Time for Reducible Flow Control
73 ACM Letters on Programming Languages and Systems,
74 Vol. 2, Num. 1-4, Mar-Dec 1993
76 An Efficient Representation for Sparse Sets
77 Preston Briggs, Linda Torczon
78 ACM Letters on Programming Languages and Systems,
79 Vol. 2, Num. 1-4, Mar-Dec 1993
81 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
82 K-H Drechsler, M.P. Stadel
83 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
85 Partial Dead Code Elimination
86 J. Knoop, O. Ruthing, B. Steffen
87 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
89 Effective Partial Redundancy Elimination
90 P. Briggs, K.D. Cooper
91 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
93 The Program Structure Tree: Computing Control Regions in Linear Time
94 R. Johnson, D. Pearson, K. Pingali
95 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
97 Optimal Code Motion: Theory and Practice
98 J. Knoop, O. Ruthing, B. Steffen
99 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
101 The power of assignment motion
102 J. Knoop, O. Ruthing, B. Steffen
103 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
105 Global code motion / global value numbering
107 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
109 Value Driven Redundancy Elimination
111 Rice University Ph.D. thesis, Apr. 1996
115 Massively Scalar Compiler Project, Rice University, Sep. 1996
117 High Performance Compilers for Parallel Computing
121 Advanced Compiler Design and Implementation
123 Morgan Kaufmann, 1997
125 Building an Optimizing Compiler
129 People wishing to speed up the code here should read:
130 Elimination Algorithms for Data Flow Analysis
131 B.G. Ryder, M.C. Paull
132 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
134 How to Analyze Large Programs Efficiently and Informatively
135 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
136 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
138 People wishing to do something different can find various possibilities
139 in the above papers and elsewhere.
144 #include "coretypes.h"
146 #include "diagnostic-core.h"
153 #include "hard-reg-set.h"
155 #include "insn-config.h"
157 #include "basic-block.h"
159 #include "function.h"
168 #include "tree-pass.h"
175 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
176 are a superset of those done by classic GCSE.
178 We perform the following steps:
180 1) Compute table of places where registers are set.
182 2) Perform copy/constant propagation.
184 3) Perform global cse using lazy code motion if not optimizing
185 for size, or code hoisting if we are.
187 4) Perform another pass of copy/constant propagation. Try to bypass
188 conditional jumps if the condition can be computed from a value of
191 Two passes of copy/constant propagation are done because the first one
192 enables more GCSE and the second one helps to clean up the copies that
193 GCSE creates. This is needed more for PRE than for Classic because Classic
194 GCSE will try to use an existing register containing the common
195 subexpression rather than create a new one. This is harder to do for PRE
196 because of the code motion (which Classic GCSE doesn't do).
198 Expressions we are interested in GCSE-ing are of the form
199 (set (pseudo-reg) (expression)).
200 Function want_to_gcse_p says what these are.
202 In addition, expressions in REG_EQUAL notes are candidates for GCSE-ing.
203 This allows PRE to hoist expressions that are expressed in multiple insns,
204 such as complex address calculations (e.g. for PIC code, or loads with a
205 high part and a low part).
207 PRE handles moving invariant expressions out of loops (by treating them as
208 partially redundant).
210 **********************
212 We used to support multiple passes but there are diminishing returns in
213 doing so. The first pass usually makes 90% of the changes that are doable.
214 A second pass can make a few more changes made possible by the first pass.
215 Experiments show any further passes don't make enough changes to justify
218 A study of spec92 using an unlimited number of passes:
219 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
220 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
221 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
223 It was found doing copy propagation between each pass enables further
226 This study was done before expressions in REG_EQUAL notes were added as
227 candidate expressions for optimization, and before the GIMPLE optimizers
228 were added. Probably, multiple passes is even less efficient now than
229 at the time when the study was conducted.
231 PRE is quite expensive in complicated functions because the DFA can take
232 a while to converge. Hence we only perform one pass.
234 **********************
236 The steps for PRE are:
238 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
240 2) Perform the data flow analysis for PRE.
242 3) Delete the redundant instructions
244 4) Insert the required copies [if any] that make the partially
245 redundant instructions fully redundant.
247 5) For other reaching expressions, insert an instruction to copy the value
248 to a newly created pseudo that will reach the redundant instruction.
250 The deletion is done first so that when we do insertions we
251 know which pseudo reg to use.
253 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
254 argue it is not. The number of iterations for the algorithm to converge
255 is typically 2-4 so I don't view it as that expensive (relatively speaking).
257 PRE GCSE depends heavily on the second CPROP pass to clean up the copies
258 we create. To make an expression reach the place where it's redundant,
259 the result of the expression is copied to a new register, and the redundant
260 expression is deleted by replacing it with this new register. Classic GCSE
261 doesn't have this problem as much as it computes the reaching defs of
262 each register in each block and thus can try to use an existing
265 /* GCSE global vars. */
267 struct target_gcse default_target_gcse
;
268 #if SWITCHABLE_TARGET
269 struct target_gcse
*this_target_gcse
= &default_target_gcse
;
272 /* Set to non-zero if CSE should run after all GCSE optimizations are done. */
273 int flag_rerun_cse_after_global_opts
;
275 /* An obstack for our working variables. */
276 static struct obstack gcse_obstack
;
278 struct reg_use
{rtx reg_rtx
; };
280 /* Hash table of expressions. */
284 /* The expression (SET_SRC for expressions, PATTERN for assignments). */
286 /* Index in the available expression bitmaps. */
288 /* Next entry with the same hash. */
289 struct expr
*next_same_hash
;
290 /* List of anticipatable occurrences in basic blocks in the function.
291 An "anticipatable occurrence" is one that is the first occurrence in the
292 basic block, the operands are not modified in the basic block prior
293 to the occurrence and the output is not used between the start of
294 the block and the occurrence. */
295 struct occr
*antic_occr
;
296 /* List of available occurrence in basic blocks in the function.
297 An "available occurrence" is one that is the last occurrence in the
298 basic block and the operands are not modified by following statements in
299 the basic block [including this insn]. */
300 struct occr
*avail_occr
;
301 /* Non-null if the computation is PRE redundant.
302 The value is the newly created pseudo-reg to record a copy of the
303 expression in all the places that reach the redundant copy. */
305 /* Maximum distance in instructions this expression can travel.
306 We avoid moving simple expressions for more than a few instructions
307 to keep register pressure under control.
308 A value of "0" removes restrictions on how far the expression can
313 /* Occurrence of an expression.
314 There is one per basic block. If a pattern appears more than once the
315 last appearance is used [or first for anticipatable expressions]. */
319 /* Next occurrence of this expression. */
321 /* The insn that computes the expression. */
323 /* Nonzero if this [anticipatable] occurrence has been deleted. */
325 /* Nonzero if this [available] occurrence has been copied to
327 /* ??? This is mutually exclusive with deleted_p, so they could share
332 typedef struct occr
*occr_t
;
334 DEF_VEC_ALLOC_P (occr_t
, heap
);
336 /* Expression and copy propagation hash tables.
337 Each hash table is an array of buckets.
338 ??? It is known that if it were an array of entries, structure elements
339 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
340 not clear whether in the final analysis a sufficient amount of memory would
341 be saved as the size of the available expression bitmaps would be larger
342 [one could build a mapping table without holes afterwards though].
343 Someday I'll perform the computation and figure it out. */
348 This is an array of `expr_hash_table_size' elements. */
351 /* Size of the hash table, in elements. */
354 /* Number of hash table elements. */
355 unsigned int n_elems
;
357 /* Whether the table is expression of copy propagation one. */
361 /* Expression hash table. */
362 static struct hash_table_d expr_hash_table
;
364 /* Copy propagation hash table. */
365 static struct hash_table_d set_hash_table
;
367 /* This is a list of expressions which are MEMs and will be used by load
369 Load motion tracks MEMs which aren't killed by
370 anything except itself. (i.e., loads and stores to a single location).
371 We can then allow movement of these MEM refs with a little special
372 allowance. (all stores copy the same value to the reaching reg used
373 for the loads). This means all values used to store into memory must have
374 no side effects so we can re-issue the setter value.
375 Store Motion uses this structure as an expression table to track stores
376 which look interesting, and might be moveable towards the exit block. */
380 struct expr
* expr
; /* Gcse expression reference for LM. */
381 rtx pattern
; /* Pattern of this mem. */
382 rtx pattern_regs
; /* List of registers mentioned by the mem. */
383 rtx loads
; /* INSN list of loads seen. */
384 rtx stores
; /* INSN list of stores seen. */
385 struct ls_expr
* next
; /* Next in the list. */
386 int invalid
; /* Invalid for some reason. */
387 int index
; /* If it maps to a bitmap index. */
388 unsigned int hash_index
; /* Index when in a hash table. */
389 rtx reaching_reg
; /* Register to use when re-writing. */
392 /* Array of implicit set patterns indexed by basic block index. */
393 static rtx
*implicit_sets
;
395 /* Head of the list of load/store memory refs. */
396 static struct ls_expr
* pre_ldst_mems
= NULL
;
398 /* Hashtable for the load/store memory refs. */
399 static htab_t pre_ldst_table
= NULL
;
401 /* Bitmap containing one bit for each register in the program.
402 Used when performing GCSE to track which registers have been set since
403 the start of the basic block. */
404 static regset reg_set_bitmap
;
406 /* Array, indexed by basic block number for a list of insns which modify
407 memory within that block. */
408 static rtx
* modify_mem_list
;
409 static bitmap modify_mem_list_set
;
411 /* This array parallels modify_mem_list, but is kept canonicalized. */
412 static rtx
* canon_modify_mem_list
;
414 /* Bitmap indexed by block numbers to record which blocks contain
416 static bitmap blocks_with_calls
;
418 /* Various variables for statistics gathering. */
420 /* Memory used in a pass.
421 This isn't intended to be absolutely precise. Its intent is only
422 to keep an eye on memory usage. */
423 static int bytes_used
;
425 /* GCSE substitutions made. */
426 static int gcse_subst_count
;
427 /* Number of copy instructions created. */
428 static int gcse_create_count
;
429 /* Number of local constants propagated. */
430 static int local_const_prop_count
;
431 /* Number of local copies propagated. */
432 static int local_copy_prop_count
;
433 /* Number of global constants propagated. */
434 static int global_const_prop_count
;
435 /* Number of global copies propagated. */
436 static int global_copy_prop_count
;
438 /* Doing code hoisting. */
439 static bool doing_code_hoisting_p
= false;
441 /* For available exprs */
442 static sbitmap
*ae_kill
;
444 static void compute_can_copy (void);
445 static void *gmalloc (size_t) ATTRIBUTE_MALLOC
;
446 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC
;
447 static void *gcse_alloc (unsigned long);
448 static void alloc_gcse_mem (void);
449 static void free_gcse_mem (void);
450 static void hash_scan_insn (rtx
, struct hash_table_d
*);
451 static void hash_scan_set (rtx
, rtx
, struct hash_table_d
*);
452 static void hash_scan_clobber (rtx
, rtx
, struct hash_table_d
*);
453 static void hash_scan_call (rtx
, rtx
, struct hash_table_d
*);
454 static int want_to_gcse_p (rtx
, int *);
455 static bool gcse_constant_p (const_rtx
);
456 static int oprs_unchanged_p (const_rtx
, const_rtx
, int);
457 static int oprs_anticipatable_p (const_rtx
, const_rtx
);
458 static int oprs_available_p (const_rtx
, const_rtx
);
459 static void insert_expr_in_table (rtx
, enum machine_mode
, rtx
, int, int, int,
460 struct hash_table_d
*);
461 static void insert_set_in_table (rtx
, rtx
, struct hash_table_d
*);
462 static unsigned int hash_expr (const_rtx
, enum machine_mode
, int *, int);
463 static unsigned int hash_set (int, int);
464 static int expr_equiv_p (const_rtx
, const_rtx
);
465 static void record_last_reg_set_info (rtx
, int);
466 static void record_last_mem_set_info (rtx
);
467 static void record_last_set_info (rtx
, const_rtx
, void *);
468 static void compute_hash_table (struct hash_table_d
*);
469 static void alloc_hash_table (struct hash_table_d
*, int);
470 static void free_hash_table (struct hash_table_d
*);
471 static void compute_hash_table_work (struct hash_table_d
*);
472 static void dump_hash_table (FILE *, const char *, struct hash_table_d
*);
473 static struct expr
*lookup_set (unsigned int, struct hash_table_d
*);
474 static struct expr
*next_set (unsigned int, struct expr
*);
475 static void reset_opr_set_tables (void);
476 static int oprs_not_set_p (const_rtx
, const_rtx
);
477 static void mark_call (rtx
);
478 static void mark_set (rtx
, rtx
);
479 static void mark_clobber (rtx
, rtx
);
480 static void mark_oprs_set (rtx
);
481 static void alloc_cprop_mem (int, int);
482 static void free_cprop_mem (void);
483 static void compute_transp (const_rtx
, int, sbitmap
*, int);
484 static void compute_local_properties (sbitmap
*, sbitmap
*, sbitmap
*,
485 struct hash_table_d
*);
486 static void compute_cprop_data (void);
487 static void find_used_regs (rtx
*, void *);
488 static int try_replace_reg (rtx
, rtx
, rtx
);
489 static struct expr
*find_avail_set (int, rtx
);
490 static int cprop_jump (basic_block
, rtx
, rtx
, rtx
, rtx
);
491 static void mems_conflict_for_gcse_p (rtx
, const_rtx
, void *);
492 static int load_killed_in_block_p (const_basic_block
, int, const_rtx
, int);
493 static void canon_list_insert (rtx
, const_rtx
, void *);
494 static int cprop_insn (rtx
);
495 static void find_implicit_sets (void);
496 static int one_cprop_pass (void);
497 static bool constprop_register (rtx
, rtx
, rtx
);
498 static struct expr
*find_bypass_set (int, int);
499 static bool reg_killed_on_edge (const_rtx
, const_edge
);
500 static int bypass_block (basic_block
, rtx
, rtx
);
501 static int bypass_conditional_jumps (void);
502 static void alloc_pre_mem (int, int);
503 static void free_pre_mem (void);
504 static void compute_pre_data (void);
505 static int pre_expr_reaches_here_p (basic_block
, struct expr
*,
507 static void insert_insn_end_basic_block (struct expr
*, basic_block
);
508 static void pre_insert_copy_insn (struct expr
*, rtx
);
509 static void pre_insert_copies (void);
510 static int pre_delete (void);
511 static int pre_gcse (void);
512 static int one_pre_gcse_pass (void);
513 static void add_label_notes (rtx
, rtx
);
514 static void alloc_code_hoist_mem (int, int);
515 static void free_code_hoist_mem (void);
516 static void compute_code_hoist_vbeinout (void);
517 static void compute_code_hoist_data (void);
518 static int hoist_expr_reaches_here_p (basic_block
, int, basic_block
, char *,
520 static int hoist_code (void);
521 static int one_code_hoisting_pass (void);
522 static rtx
process_insert_insn (struct expr
*);
523 static int pre_edge_insert (struct edge_list
*, struct expr
**);
524 static int pre_expr_reaches_here_p_work (basic_block
, struct expr
*,
525 basic_block
, char *);
526 static struct ls_expr
* ldst_entry (rtx
);
527 static void free_ldst_entry (struct ls_expr
*);
528 static void free_ldst_mems (void);
529 static void print_ldst_list (FILE *);
530 static struct ls_expr
* find_rtx_in_ldst (rtx
);
531 static inline struct ls_expr
* first_ls_expr (void);
532 static inline struct ls_expr
* next_ls_expr (struct ls_expr
*);
533 static int simple_mem (const_rtx
);
534 static void invalidate_any_buried_refs (rtx
);
535 static void compute_ld_motion_mems (void);
536 static void trim_ld_motion_mems (void);
537 static void update_ld_motion_stores (struct expr
*);
538 static void free_insn_expr_list_list (rtx
*);
539 static void clear_modify_mem_tables (void);
540 static void free_modify_mem_tables (void);
541 static rtx
gcse_emit_move_after (rtx
, rtx
, rtx
);
542 static void local_cprop_find_used_regs (rtx
*, void *);
543 static bool do_local_cprop (rtx
, rtx
);
544 static int local_cprop_pass (void);
545 static bool is_too_expensive (const char *);
547 #define GNEW(T) ((T *) gmalloc (sizeof (T)))
548 #define GCNEW(T) ((T *) gcalloc (1, sizeof (T)))
550 #define GNEWVEC(T, N) ((T *) gmalloc (sizeof (T) * (N)))
551 #define GCNEWVEC(T, N) ((T *) gcalloc ((N), sizeof (T)))
553 #define GNEWVAR(T, S) ((T *) gmalloc ((S)))
554 #define GCNEWVAR(T, S) ((T *) gcalloc (1, (S)))
556 #define GOBNEW(T) ((T *) gcse_alloc (sizeof (T)))
557 #define GOBNEWVAR(T, S) ((T *) gcse_alloc ((S)))
559 /* Misc. utilities. */
562 (this_target_gcse->x_can_copy)
563 #define can_copy_init_p \
564 (this_target_gcse->x_can_copy_init_p)
566 /* Compute which modes support reg/reg copy operations. */
569 compute_can_copy (void)
572 #ifndef AVOID_CCMODE_COPIES
575 memset (can_copy
, 0, NUM_MACHINE_MODES
);
578 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
579 if (GET_MODE_CLASS (i
) == MODE_CC
)
581 #ifdef AVOID_CCMODE_COPIES
584 reg
= gen_rtx_REG ((enum machine_mode
) i
, LAST_VIRTUAL_REGISTER
+ 1);
585 insn
= emit_insn (gen_rtx_SET (VOIDmode
, reg
, reg
));
586 if (recog (PATTERN (insn
), insn
, NULL
) >= 0)
596 /* Returns whether the mode supports reg/reg copy operations. */
599 can_copy_p (enum machine_mode mode
)
601 if (! can_copy_init_p
)
604 can_copy_init_p
= true;
607 return can_copy
[mode
] != 0;
611 /* Cover function to xmalloc to record bytes allocated. */
614 gmalloc (size_t size
)
617 return xmalloc (size
);
620 /* Cover function to xcalloc to record bytes allocated. */
623 gcalloc (size_t nelem
, size_t elsize
)
625 bytes_used
+= nelem
* elsize
;
626 return xcalloc (nelem
, elsize
);
629 /* Cover function to obstack_alloc. */
632 gcse_alloc (unsigned long size
)
635 return obstack_alloc (&gcse_obstack
, size
);
638 /* Allocate memory for the reg/memory set tracking tables.
639 This is called at the start of each pass. */
642 alloc_gcse_mem (void)
644 /* Allocate vars to track sets of regs. */
645 reg_set_bitmap
= ALLOC_REG_SET (NULL
);
647 /* Allocate array to keep a list of insns which modify memory in each
649 modify_mem_list
= GCNEWVEC (rtx
, last_basic_block
);
650 canon_modify_mem_list
= GCNEWVEC (rtx
, last_basic_block
);
651 modify_mem_list_set
= BITMAP_ALLOC (NULL
);
652 blocks_with_calls
= BITMAP_ALLOC (NULL
);
655 /* Free memory allocated by alloc_gcse_mem. */
660 free_modify_mem_tables ();
661 BITMAP_FREE (modify_mem_list_set
);
662 BITMAP_FREE (blocks_with_calls
);
665 /* Compute the local properties of each recorded expression.
667 Local properties are those that are defined by the block, irrespective of
670 An expression is transparent in a block if its operands are not modified
673 An expression is computed (locally available) in a block if it is computed
674 at least once and expression would contain the same value if the
675 computation was moved to the end of the block.
677 An expression is locally anticipatable in a block if it is computed at
678 least once and expression would contain the same value if the computation
679 was moved to the beginning of the block.
681 We call this routine for cprop, pre and code hoisting. They all compute
682 basically the same information and thus can easily share this code.
684 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
685 properties. If NULL, then it is not necessary to compute or record that
688 TABLE controls which hash table to look at. If it is set hash table,
689 additionally, TRANSP is computed as ~TRANSP, since this is really cprop's
693 compute_local_properties (sbitmap
*transp
, sbitmap
*comp
, sbitmap
*antloc
,
694 struct hash_table_d
*table
)
698 /* Initialize any bitmaps that were passed in. */
702 sbitmap_vector_zero (transp
, last_basic_block
);
704 sbitmap_vector_ones (transp
, last_basic_block
);
708 sbitmap_vector_zero (comp
, last_basic_block
);
710 sbitmap_vector_zero (antloc
, last_basic_block
);
712 for (i
= 0; i
< table
->size
; i
++)
716 for (expr
= table
->table
[i
]; expr
!= NULL
; expr
= expr
->next_same_hash
)
718 int indx
= expr
->bitmap_index
;
721 /* The expression is transparent in this block if it is not killed.
722 We start by assuming all are transparent [none are killed], and
723 then reset the bits for those that are. */
725 compute_transp (expr
->expr
, indx
, transp
, table
->set_p
);
727 /* The occurrences recorded in antic_occr are exactly those that
728 we want to set to nonzero in ANTLOC. */
730 for (occr
= expr
->antic_occr
; occr
!= NULL
; occr
= occr
->next
)
732 SET_BIT (antloc
[BLOCK_FOR_INSN (occr
->insn
)->index
], indx
);
734 /* While we're scanning the table, this is a good place to
739 /* The occurrences recorded in avail_occr are exactly those that
740 we want to set to nonzero in COMP. */
742 for (occr
= expr
->avail_occr
; occr
!= NULL
; occr
= occr
->next
)
744 SET_BIT (comp
[BLOCK_FOR_INSN (occr
->insn
)->index
], indx
);
746 /* While we're scanning the table, this is a good place to
751 /* While we're scanning the table, this is a good place to
753 expr
->reaching_reg
= 0;
758 /* Hash table support. */
760 struct reg_avail_info
767 static struct reg_avail_info
*reg_avail_info
;
768 static basic_block current_bb
;
771 /* See whether X, the source of a set, is something we want to consider for
775 want_to_gcse_p (rtx x
, int *max_distance_ptr
)
778 /* On register stack architectures, don't GCSE constants from the
779 constant pool, as the benefits are often swamped by the overhead
780 of shuffling the register stack between basic blocks. */
781 if (IS_STACK_MODE (GET_MODE (x
)))
782 x
= avoid_constant_pool_reference (x
);
785 /* GCSE'ing constants:
787 We do not specifically distinguish between constant and non-constant
788 expressions in PRE and Hoist. We use rtx_cost below to limit
789 the maximum distance simple expressions can travel.
791 Nevertheless, constants are much easier to GCSE, and, hence,
792 it is easy to overdo the optimizations. Usually, excessive PRE and
793 Hoisting of constant leads to increased register pressure.
795 RA can deal with this by rematerialing some of the constants.
796 Therefore, it is important that the back-end generates sets of constants
797 in a way that allows reload rematerialize them under high register
798 pressure, i.e., a pseudo register with REG_EQUAL to constant
799 is set only once. Failing to do so will result in IRA/reload
800 spilling such constants under high register pressure instead of
801 rematerializing them. */
803 switch (GET_CODE (x
))
814 if (!doing_code_hoisting_p
)
815 /* Do not PRE constants. */
821 if (doing_code_hoisting_p
)
822 /* PRE doesn't implement max_distance restriction. */
827 gcc_assert (!optimize_function_for_speed_p (cfun
)
828 && optimize_function_for_size_p (cfun
));
829 cost
= rtx_cost (x
, SET
, 0);
831 if (cost
< COSTS_N_INSNS (GCSE_UNRESTRICTED_COST
))
833 max_distance
= (GCSE_COST_DISTANCE_RATIO
* cost
) / 10;
834 if (max_distance
== 0)
837 gcc_assert (max_distance
> 0);
842 if (max_distance_ptr
)
843 *max_distance_ptr
= max_distance
;
846 return can_assign_to_reg_without_clobbers_p (x
);
850 /* Used internally by can_assign_to_reg_without_clobbers_p. */
852 static GTY(()) rtx test_insn
;
854 /* Return true if we can assign X to a pseudo register such that the
855 resulting insn does not result in clobbering a hard register as a
858 Additionally, if the target requires it, check that the resulting insn
859 can be copied. If it cannot, this means that X is special and probably
860 has hidden side-effects we don't want to mess with.
862 This function is typically used by code motion passes, to verify
863 that it is safe to insert an insn without worrying about clobbering
864 maybe live hard regs. */
867 can_assign_to_reg_without_clobbers_p (rtx x
)
869 int num_clobbers
= 0;
872 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
873 if (general_operand (x
, GET_MODE (x
)))
875 else if (GET_MODE (x
) == VOIDmode
)
878 /* Otherwise, check if we can make a valid insn from it. First initialize
879 our test insn if we haven't already. */
883 = make_insn_raw (gen_rtx_SET (VOIDmode
,
884 gen_rtx_REG (word_mode
,
885 FIRST_PSEUDO_REGISTER
* 2),
887 NEXT_INSN (test_insn
) = PREV_INSN (test_insn
) = 0;
890 /* Now make an insn like the one we would make when GCSE'ing and see if
892 PUT_MODE (SET_DEST (PATTERN (test_insn
)), GET_MODE (x
));
893 SET_SRC (PATTERN (test_insn
)) = x
;
895 icode
= recog (PATTERN (test_insn
), test_insn
, &num_clobbers
);
899 if (num_clobbers
> 0 && added_clobbers_hard_reg_p (icode
))
902 if (targetm
.cannot_copy_insn_p
&& targetm
.cannot_copy_insn_p (test_insn
))
908 /* Return nonzero if the operands of expression X are unchanged from the
909 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
910 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
913 oprs_unchanged_p (const_rtx x
, const_rtx insn
, int avail_p
)
927 struct reg_avail_info
*info
= ®_avail_info
[REGNO (x
)];
929 if (info
->last_bb
!= current_bb
)
932 return info
->last_set
< DF_INSN_LUID (insn
);
934 return info
->first_set
>= DF_INSN_LUID (insn
);
938 if (load_killed_in_block_p (current_bb
, DF_INSN_LUID (insn
),
942 return oprs_unchanged_p (XEXP (x
, 0), insn
, avail_p
);
969 for (i
= GET_RTX_LENGTH (code
) - 1, fmt
= GET_RTX_FORMAT (code
); i
>= 0; i
--)
973 /* If we are about to do the last recursive call needed at this
974 level, change it into iteration. This function is called enough
977 return oprs_unchanged_p (XEXP (x
, i
), insn
, avail_p
);
979 else if (! oprs_unchanged_p (XEXP (x
, i
), insn
, avail_p
))
982 else if (fmt
[i
] == 'E')
983 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
984 if (! oprs_unchanged_p (XVECEXP (x
, i
, j
), insn
, avail_p
))
991 /* Used for communication between mems_conflict_for_gcse_p and
992 load_killed_in_block_p. Nonzero if mems_conflict_for_gcse_p finds a
993 conflict between two memory references. */
994 static int gcse_mems_conflict_p
;
996 /* Used for communication between mems_conflict_for_gcse_p and
997 load_killed_in_block_p. A memory reference for a load instruction,
998 mems_conflict_for_gcse_p will see if a memory store conflicts with
1000 static const_rtx gcse_mem_operand
;
1002 /* DEST is the output of an instruction. If it is a memory reference, and
1003 possibly conflicts with the load found in gcse_mem_operand, then set
1004 gcse_mems_conflict_p to a nonzero value. */
1007 mems_conflict_for_gcse_p (rtx dest
, const_rtx setter ATTRIBUTE_UNUSED
,
1008 void *data ATTRIBUTE_UNUSED
)
1010 while (GET_CODE (dest
) == SUBREG
1011 || GET_CODE (dest
) == ZERO_EXTRACT
1012 || GET_CODE (dest
) == STRICT_LOW_PART
)
1013 dest
= XEXP (dest
, 0);
1015 /* If DEST is not a MEM, then it will not conflict with the load. Note
1016 that function calls are assumed to clobber memory, but are handled
1021 /* If we are setting a MEM in our list of specially recognized MEMs,
1022 don't mark as killed this time. */
1024 if (expr_equiv_p (dest
, gcse_mem_operand
) && pre_ldst_mems
!= NULL
)
1026 if (!find_rtx_in_ldst (dest
))
1027 gcse_mems_conflict_p
= 1;
1031 if (true_dependence (dest
, GET_MODE (dest
), gcse_mem_operand
,
1033 gcse_mems_conflict_p
= 1;
1036 /* Return nonzero if the expression in X (a memory reference) is killed
1037 in block BB before or after the insn with the LUID in UID_LIMIT.
1038 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
1041 To check the entire block, set UID_LIMIT to max_uid + 1 and
1045 load_killed_in_block_p (const_basic_block bb
, int uid_limit
, const_rtx x
, int avail_p
)
1047 rtx list_entry
= modify_mem_list
[bb
->index
];
1049 /* If this is a readonly then we aren't going to be changing it. */
1050 if (MEM_READONLY_P (x
))
1056 /* Ignore entries in the list that do not apply. */
1058 && DF_INSN_LUID (XEXP (list_entry
, 0)) < uid_limit
)
1060 && DF_INSN_LUID (XEXP (list_entry
, 0)) > uid_limit
))
1062 list_entry
= XEXP (list_entry
, 1);
1066 setter
= XEXP (list_entry
, 0);
1068 /* If SETTER is a call everything is clobbered. Note that calls
1069 to pure functions are never put on the list, so we need not
1070 worry about them. */
1071 if (CALL_P (setter
))
1074 /* SETTER must be an INSN of some kind that sets memory. Call
1075 note_stores to examine each hunk of memory that is modified.
1077 The note_stores interface is pretty limited, so we have to
1078 communicate via global variables. Yuk. */
1079 gcse_mem_operand
= x
;
1080 gcse_mems_conflict_p
= 0;
1081 note_stores (PATTERN (setter
), mems_conflict_for_gcse_p
, NULL
);
1082 if (gcse_mems_conflict_p
)
1084 list_entry
= XEXP (list_entry
, 1);
1089 /* Return nonzero if the operands of expression X are unchanged from
1090 the start of INSN's basic block up to but not including INSN. */
1093 oprs_anticipatable_p (const_rtx x
, const_rtx insn
)
1095 return oprs_unchanged_p (x
, insn
, 0);
1098 /* Return nonzero if the operands of expression X are unchanged from
1099 INSN to the end of INSN's basic block. */
1102 oprs_available_p (const_rtx x
, const_rtx insn
)
1104 return oprs_unchanged_p (x
, insn
, 1);
1107 /* Hash expression X.
1109 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1110 indicating if a volatile operand is found or if the expression contains
1111 something we don't want to insert in the table. HASH_TABLE_SIZE is
1112 the current size of the hash table to be probed. */
1115 hash_expr (const_rtx x
, enum machine_mode mode
, int *do_not_record_p
,
1116 int hash_table_size
)
1120 *do_not_record_p
= 0;
1122 hash
= hash_rtx (x
, mode
, do_not_record_p
,
1123 NULL
, /*have_reg_qty=*/false);
1124 return hash
% hash_table_size
;
1127 /* Hash a set of register REGNO.
1129 Sets are hashed on the register that is set. This simplifies the PRE copy
1132 ??? May need to make things more elaborate. Later, as necessary. */
1135 hash_set (int regno
, int hash_table_size
)
1140 return hash
% hash_table_size
;
1143 /* Return nonzero if exp1 is equivalent to exp2. */
1146 expr_equiv_p (const_rtx x
, const_rtx y
)
1148 return exp_equiv_p (x
, y
, 0, true);
1151 /* Insert expression X in INSN in the hash TABLE.
1152 If it is already present, record it as the last occurrence in INSN's
1155 MODE is the mode of the value X is being stored into.
1156 It is only used if X is a CONST_INT.
1158 ANTIC_P is nonzero if X is an anticipatable expression.
1159 AVAIL_P is nonzero if X is an available expression.
1161 MAX_DISTANCE is the maximum distance in instructions this expression can
1165 insert_expr_in_table (rtx x
, enum machine_mode mode
, rtx insn
, int antic_p
,
1166 int avail_p
, int max_distance
, struct hash_table_d
*table
)
1168 int found
, do_not_record_p
;
1170 struct expr
*cur_expr
, *last_expr
= NULL
;
1171 struct occr
*antic_occr
, *avail_occr
;
1173 hash
= hash_expr (x
, mode
, &do_not_record_p
, table
->size
);
1175 /* Do not insert expression in table if it contains volatile operands,
1176 or if hash_expr determines the expression is something we don't want
1177 to or can't handle. */
1178 if (do_not_record_p
)
1181 cur_expr
= table
->table
[hash
];
1184 while (cur_expr
&& 0 == (found
= expr_equiv_p (cur_expr
->expr
, x
)))
1186 /* If the expression isn't found, save a pointer to the end of
1188 last_expr
= cur_expr
;
1189 cur_expr
= cur_expr
->next_same_hash
;
1194 cur_expr
= GOBNEW (struct expr
);
1195 bytes_used
+= sizeof (struct expr
);
1196 if (table
->table
[hash
] == NULL
)
1197 /* This is the first pattern that hashed to this index. */
1198 table
->table
[hash
] = cur_expr
;
1200 /* Add EXPR to end of this hash chain. */
1201 last_expr
->next_same_hash
= cur_expr
;
1203 /* Set the fields of the expr element. */
1205 cur_expr
->bitmap_index
= table
->n_elems
++;
1206 cur_expr
->next_same_hash
= NULL
;
1207 cur_expr
->antic_occr
= NULL
;
1208 cur_expr
->avail_occr
= NULL
;
1209 gcc_assert (max_distance
>= 0);
1210 cur_expr
->max_distance
= max_distance
;
1213 gcc_assert (cur_expr
->max_distance
== max_distance
);
1215 /* Now record the occurrence(s). */
1218 antic_occr
= cur_expr
->antic_occr
;
1221 && BLOCK_FOR_INSN (antic_occr
->insn
) != BLOCK_FOR_INSN (insn
))
1225 /* Found another instance of the expression in the same basic block.
1226 Prefer the currently recorded one. We want the first one in the
1227 block and the block is scanned from start to end. */
1228 ; /* nothing to do */
1231 /* First occurrence of this expression in this basic block. */
1232 antic_occr
= GOBNEW (struct occr
);
1233 bytes_used
+= sizeof (struct occr
);
1234 antic_occr
->insn
= insn
;
1235 antic_occr
->next
= cur_expr
->antic_occr
;
1236 antic_occr
->deleted_p
= 0;
1237 cur_expr
->antic_occr
= antic_occr
;
1243 avail_occr
= cur_expr
->avail_occr
;
1246 && BLOCK_FOR_INSN (avail_occr
->insn
) == BLOCK_FOR_INSN (insn
))
1248 /* Found another instance of the expression in the same basic block.
1249 Prefer this occurrence to the currently recorded one. We want
1250 the last one in the block and the block is scanned from start
1252 avail_occr
->insn
= insn
;
1256 /* First occurrence of this expression in this basic block. */
1257 avail_occr
= GOBNEW (struct occr
);
1258 bytes_used
+= sizeof (struct occr
);
1259 avail_occr
->insn
= insn
;
1260 avail_occr
->next
= cur_expr
->avail_occr
;
1261 avail_occr
->deleted_p
= 0;
1262 cur_expr
->avail_occr
= avail_occr
;
1267 /* Insert pattern X in INSN in the hash table.
1268 X is a SET of a reg to either another reg or a constant.
1269 If it is already present, record it as the last occurrence in INSN's
1273 insert_set_in_table (rtx x
, rtx insn
, struct hash_table_d
*table
)
1277 struct expr
*cur_expr
, *last_expr
= NULL
;
1278 struct occr
*cur_occr
;
1280 gcc_assert (GET_CODE (x
) == SET
&& REG_P (SET_DEST (x
)));
1282 hash
= hash_set (REGNO (SET_DEST (x
)), table
->size
);
1284 cur_expr
= table
->table
[hash
];
1287 while (cur_expr
&& 0 == (found
= expr_equiv_p (cur_expr
->expr
, x
)))
1289 /* If the expression isn't found, save a pointer to the end of
1291 last_expr
= cur_expr
;
1292 cur_expr
= cur_expr
->next_same_hash
;
1297 cur_expr
= GOBNEW (struct expr
);
1298 bytes_used
+= sizeof (struct expr
);
1299 if (table
->table
[hash
] == NULL
)
1300 /* This is the first pattern that hashed to this index. */
1301 table
->table
[hash
] = cur_expr
;
1303 /* Add EXPR to end of this hash chain. */
1304 last_expr
->next_same_hash
= cur_expr
;
1306 /* Set the fields of the expr element.
1307 We must copy X because it can be modified when copy propagation is
1308 performed on its operands. */
1309 cur_expr
->expr
= copy_rtx (x
);
1310 cur_expr
->bitmap_index
= table
->n_elems
++;
1311 cur_expr
->next_same_hash
= NULL
;
1312 cur_expr
->antic_occr
= NULL
;
1313 cur_expr
->avail_occr
= NULL
;
1314 /* Not used for set_p tables. */
1315 cur_expr
->max_distance
= 0;
1318 /* Now record the occurrence. */
1319 cur_occr
= cur_expr
->avail_occr
;
1322 && BLOCK_FOR_INSN (cur_occr
->insn
) == BLOCK_FOR_INSN (insn
))
1324 /* Found another instance of the expression in the same basic block.
1325 Prefer this occurrence to the currently recorded one. We want
1326 the last one in the block and the block is scanned from start
1328 cur_occr
->insn
= insn
;
1332 /* First occurrence of this expression in this basic block. */
1333 cur_occr
= GOBNEW (struct occr
);
1334 bytes_used
+= sizeof (struct occr
);
1335 cur_occr
->insn
= insn
;
1336 cur_occr
->next
= cur_expr
->avail_occr
;
1337 cur_occr
->deleted_p
= 0;
1338 cur_expr
->avail_occr
= cur_occr
;
1342 /* Determine whether the rtx X should be treated as a constant for
1343 the purposes of GCSE's constant propagation. */
1346 gcse_constant_p (const_rtx x
)
1348 /* Consider a COMPARE of two integers constant. */
1349 if (GET_CODE (x
) == COMPARE
1350 && CONST_INT_P (XEXP (x
, 0))
1351 && CONST_INT_P (XEXP (x
, 1)))
1354 /* Consider a COMPARE of the same registers is a constant
1355 if they are not floating point registers. */
1356 if (GET_CODE(x
) == COMPARE
1357 && REG_P (XEXP (x
, 0)) && REG_P (XEXP (x
, 1))
1358 && REGNO (XEXP (x
, 0)) == REGNO (XEXP (x
, 1))
1359 && ! FLOAT_MODE_P (GET_MODE (XEXP (x
, 0)))
1360 && ! FLOAT_MODE_P (GET_MODE (XEXP (x
, 1))))
1363 /* Since X might be inserted more than once we have to take care that it
1365 return CONSTANT_P (x
) && (GET_CODE (x
) != CONST
|| shared_const_p (x
));
1368 /* Scan pattern PAT of INSN and add an entry to the hash TABLE (set or
1372 hash_scan_set (rtx pat
, rtx insn
, struct hash_table_d
*table
)
1374 rtx src
= SET_SRC (pat
);
1375 rtx dest
= SET_DEST (pat
);
1378 if (GET_CODE (src
) == CALL
)
1379 hash_scan_call (src
, insn
, table
);
1381 else if (REG_P (dest
))
1383 unsigned int regno
= REGNO (dest
);
1385 int max_distance
= 0;
1387 /* See if a REG_EQUAL note shows this equivalent to a simpler expression.
1389 This allows us to do a single GCSE pass and still eliminate
1390 redundant constants, addresses or other expressions that are
1391 constructed with multiple instructions.
1393 However, keep the original SRC if INSN is a simple reg-reg move. In
1394 In this case, there will almost always be a REG_EQUAL note on the
1395 insn that sets SRC. By recording the REG_EQUAL value here as SRC
1396 for INSN, we miss copy propagation opportunities and we perform the
1397 same PRE GCSE operation repeatedly on the same REG_EQUAL value if we
1398 do more than one PRE GCSE pass.
1400 Note that this does not impede profitable constant propagations. We
1401 "look through" reg-reg sets in lookup_avail_set. */
1402 note
= find_reg_equal_equiv_note (insn
);
1404 && REG_NOTE_KIND (note
) == REG_EQUAL
1407 ? gcse_constant_p (XEXP (note
, 0))
1408 : want_to_gcse_p (XEXP (note
, 0), NULL
)))
1409 src
= XEXP (note
, 0), pat
= gen_rtx_SET (VOIDmode
, dest
, src
);
1411 /* Only record sets of pseudo-regs in the hash table. */
1413 && regno
>= FIRST_PSEUDO_REGISTER
1414 /* Don't GCSE something if we can't do a reg/reg copy. */
1415 && can_copy_p (GET_MODE (dest
))
1416 /* GCSE commonly inserts instruction after the insn. We can't
1417 do that easily for EH edges so disable GCSE on these for now. */
1418 /* ??? We can now easily create new EH landing pads at the
1419 gimple level, for splitting edges; there's no reason we
1420 can't do the same thing at the rtl level. */
1421 && !can_throw_internal (insn
)
1422 /* Is SET_SRC something we want to gcse? */
1423 && want_to_gcse_p (src
, &max_distance
)
1424 /* Don't CSE a nop. */
1425 && ! set_noop_p (pat
)
1426 /* Don't GCSE if it has attached REG_EQUIV note.
1427 At this point this only function parameters should have
1428 REG_EQUIV notes and if the argument slot is used somewhere
1429 explicitly, it means address of parameter has been taken,
1430 so we should not extend the lifetime of the pseudo. */
1431 && (note
== NULL_RTX
|| ! MEM_P (XEXP (note
, 0))))
1433 /* An expression is not anticipatable if its operands are
1434 modified before this insn or if this is not the only SET in
1435 this insn. The latter condition does not have to mean that
1436 SRC itself is not anticipatable, but we just will not be
1437 able to handle code motion of insns with multiple sets. */
1438 int antic_p
= oprs_anticipatable_p (src
, insn
)
1439 && !multiple_sets (insn
);
1440 /* An expression is not available if its operands are
1441 subsequently modified, including this insn. It's also not
1442 available if this is a branch, because we can't insert
1443 a set after the branch. */
1444 int avail_p
= (oprs_available_p (src
, insn
)
1445 && ! JUMP_P (insn
));
1447 insert_expr_in_table (src
, GET_MODE (dest
), insn
, antic_p
, avail_p
,
1448 max_distance
, table
);
1451 /* Record sets for constant/copy propagation. */
1452 else if (table
->set_p
1453 && regno
>= FIRST_PSEUDO_REGISTER
1455 && REGNO (src
) >= FIRST_PSEUDO_REGISTER
1456 && can_copy_p (GET_MODE (dest
))
1457 && REGNO (src
) != regno
)
1458 || gcse_constant_p (src
))
1459 /* A copy is not available if its src or dest is subsequently
1460 modified. Here we want to search from INSN+1 on, but
1461 oprs_available_p searches from INSN on. */
1462 && (insn
== BB_END (BLOCK_FOR_INSN (insn
))
1463 || (tmp
= next_nonnote_nondebug_insn (insn
)) == NULL_RTX
1464 || BLOCK_FOR_INSN (tmp
) != BLOCK_FOR_INSN (insn
)
1465 || oprs_available_p (pat
, tmp
)))
1466 insert_set_in_table (pat
, insn
, table
);
1468 /* In case of store we want to consider the memory value as available in
1469 the REG stored in that memory. This makes it possible to remove
1470 redundant loads from due to stores to the same location. */
1471 else if (flag_gcse_las
&& REG_P (src
) && MEM_P (dest
))
1473 unsigned int regno
= REGNO (src
);
1474 int max_distance
= 0;
1476 /* Do not do this for constant/copy propagation. */
1478 /* Only record sets of pseudo-regs in the hash table. */
1479 && regno
>= FIRST_PSEUDO_REGISTER
1480 /* Don't GCSE something if we can't do a reg/reg copy. */
1481 && can_copy_p (GET_MODE (src
))
1482 /* GCSE commonly inserts instruction after the insn. We can't
1483 do that easily for EH edges so disable GCSE on these for now. */
1484 && !can_throw_internal (insn
)
1485 /* Is SET_DEST something we want to gcse? */
1486 && want_to_gcse_p (dest
, &max_distance
)
1487 /* Don't CSE a nop. */
1488 && ! set_noop_p (pat
)
1489 /* Don't GCSE if it has attached REG_EQUIV note.
1490 At this point this only function parameters should have
1491 REG_EQUIV notes and if the argument slot is used somewhere
1492 explicitly, it means address of parameter has been taken,
1493 so we should not extend the lifetime of the pseudo. */
1494 && ((note
= find_reg_note (insn
, REG_EQUIV
, NULL_RTX
)) == 0
1495 || ! MEM_P (XEXP (note
, 0))))
1497 /* Stores are never anticipatable. */
1499 /* An expression is not available if its operands are
1500 subsequently modified, including this insn. It's also not
1501 available if this is a branch, because we can't insert
1502 a set after the branch. */
1503 int avail_p
= oprs_available_p (dest
, insn
)
1506 /* Record the memory expression (DEST) in the hash table. */
1507 insert_expr_in_table (dest
, GET_MODE (dest
), insn
,
1508 antic_p
, avail_p
, max_distance
, table
);
1514 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED
, rtx insn ATTRIBUTE_UNUSED
,
1515 struct hash_table_d
*table ATTRIBUTE_UNUSED
)
1517 /* Currently nothing to do. */
1521 hash_scan_call (rtx x ATTRIBUTE_UNUSED
, rtx insn ATTRIBUTE_UNUSED
,
1522 struct hash_table_d
*table ATTRIBUTE_UNUSED
)
1524 /* Currently nothing to do. */
1527 /* Process INSN and add hash table entries as appropriate.
1529 Only available expressions that set a single pseudo-reg are recorded.
1531 Single sets in a PARALLEL could be handled, but it's an extra complication
1532 that isn't dealt with right now. The trick is handling the CLOBBERs that
1533 are also in the PARALLEL. Later.
1535 If SET_P is nonzero, this is for the assignment hash table,
1536 otherwise it is for the expression hash table. */
1539 hash_scan_insn (rtx insn
, struct hash_table_d
*table
)
1541 rtx pat
= PATTERN (insn
);
1544 /* Pick out the sets of INSN and for other forms of instructions record
1545 what's been modified. */
1547 if (GET_CODE (pat
) == SET
)
1548 hash_scan_set (pat
, insn
, table
);
1549 else if (GET_CODE (pat
) == PARALLEL
)
1550 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1552 rtx x
= XVECEXP (pat
, 0, i
);
1554 if (GET_CODE (x
) == SET
)
1555 hash_scan_set (x
, insn
, table
);
1556 else if (GET_CODE (x
) == CLOBBER
)
1557 hash_scan_clobber (x
, insn
, table
);
1558 else if (GET_CODE (x
) == CALL
)
1559 hash_scan_call (x
, insn
, table
);
1562 else if (GET_CODE (pat
) == CLOBBER
)
1563 hash_scan_clobber (pat
, insn
, table
);
1564 else if (GET_CODE (pat
) == CALL
)
1565 hash_scan_call (pat
, insn
, table
);
1569 dump_hash_table (FILE *file
, const char *name
, struct hash_table_d
*table
)
1572 /* Flattened out table, so it's printed in proper order. */
1573 struct expr
**flat_table
;
1574 unsigned int *hash_val
;
1577 flat_table
= XCNEWVEC (struct expr
*, table
->n_elems
);
1578 hash_val
= XNEWVEC (unsigned int, table
->n_elems
);
1580 for (i
= 0; i
< (int) table
->size
; i
++)
1581 for (expr
= table
->table
[i
]; expr
!= NULL
; expr
= expr
->next_same_hash
)
1583 flat_table
[expr
->bitmap_index
] = expr
;
1584 hash_val
[expr
->bitmap_index
] = i
;
1587 fprintf (file
, "%s hash table (%d buckets, %d entries)\n",
1588 name
, table
->size
, table
->n_elems
);
1590 for (i
= 0; i
< (int) table
->n_elems
; i
++)
1591 if (flat_table
[i
] != 0)
1593 expr
= flat_table
[i
];
1594 fprintf (file
, "Index %d (hash value %d; max distance %d)\n ",
1595 expr
->bitmap_index
, hash_val
[i
], expr
->max_distance
);
1596 print_rtl (file
, expr
->expr
);
1597 fprintf (file
, "\n");
1600 fprintf (file
, "\n");
1606 /* Record register first/last/block set information for REGNO in INSN.
1608 first_set records the first place in the block where the register
1609 is set and is used to compute "anticipatability".
1611 last_set records the last place in the block where the register
1612 is set and is used to compute "availability".
1614 last_bb records the block for which first_set and last_set are
1615 valid, as a quick test to invalidate them. */
1618 record_last_reg_set_info (rtx insn
, int regno
)
1620 struct reg_avail_info
*info
= ®_avail_info
[regno
];
1621 int luid
= DF_INSN_LUID (insn
);
1623 info
->last_set
= luid
;
1624 if (info
->last_bb
!= current_bb
)
1626 info
->last_bb
= current_bb
;
1627 info
->first_set
= luid
;
1632 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
1633 Note we store a pair of elements in the list, so they have to be
1634 taken off pairwise. */
1637 canon_list_insert (rtx dest ATTRIBUTE_UNUSED
, const_rtx unused1 ATTRIBUTE_UNUSED
,
1640 rtx dest_addr
, insn
;
1643 while (GET_CODE (dest
) == SUBREG
1644 || GET_CODE (dest
) == ZERO_EXTRACT
1645 || GET_CODE (dest
) == STRICT_LOW_PART
)
1646 dest
= XEXP (dest
, 0);
1648 /* If DEST is not a MEM, then it will not conflict with a load. Note
1649 that function calls are assumed to clobber memory, but are handled
1655 dest_addr
= get_addr (XEXP (dest
, 0));
1656 dest_addr
= canon_rtx (dest_addr
);
1657 insn
= (rtx
) v_insn
;
1658 bb
= BLOCK_FOR_INSN (insn
)->index
;
1660 canon_modify_mem_list
[bb
] =
1661 alloc_EXPR_LIST (VOIDmode
, dest_addr
, canon_modify_mem_list
[bb
]);
1662 canon_modify_mem_list
[bb
] =
1663 alloc_EXPR_LIST (VOIDmode
, dest
, canon_modify_mem_list
[bb
]);
1666 /* Record memory modification information for INSN. We do not actually care
1667 about the memory location(s) that are set, or even how they are set (consider
1668 a CALL_INSN). We merely need to record which insns modify memory. */
1671 record_last_mem_set_info (rtx insn
)
1673 int bb
= BLOCK_FOR_INSN (insn
)->index
;
1675 /* load_killed_in_block_p will handle the case of calls clobbering
1677 modify_mem_list
[bb
] = alloc_INSN_LIST (insn
, modify_mem_list
[bb
]);
1678 bitmap_set_bit (modify_mem_list_set
, bb
);
1682 /* Note that traversals of this loop (other than for free-ing)
1683 will break after encountering a CALL_INSN. So, there's no
1684 need to insert a pair of items, as canon_list_insert does. */
1685 canon_modify_mem_list
[bb
] =
1686 alloc_INSN_LIST (insn
, canon_modify_mem_list
[bb
]);
1687 bitmap_set_bit (blocks_with_calls
, bb
);
1690 note_stores (PATTERN (insn
), canon_list_insert
, (void*) insn
);
1693 /* Called from compute_hash_table via note_stores to handle one
1694 SET or CLOBBER in an insn. DATA is really the instruction in which
1695 the SET is taking place. */
1698 record_last_set_info (rtx dest
, const_rtx setter ATTRIBUTE_UNUSED
, void *data
)
1700 rtx last_set_insn
= (rtx
) data
;
1702 if (GET_CODE (dest
) == SUBREG
)
1703 dest
= SUBREG_REG (dest
);
1706 record_last_reg_set_info (last_set_insn
, REGNO (dest
));
1707 else if (MEM_P (dest
)
1708 /* Ignore pushes, they clobber nothing. */
1709 && ! push_operand (dest
, GET_MODE (dest
)))
1710 record_last_mem_set_info (last_set_insn
);
1713 /* Top level function to create an expression or assignment hash table.
1715 Expression entries are placed in the hash table if
1716 - they are of the form (set (pseudo-reg) src),
1717 - src is something we want to perform GCSE on,
1718 - none of the operands are subsequently modified in the block
1720 Assignment entries are placed in the hash table if
1721 - they are of the form (set (pseudo-reg) src),
1722 - src is something we want to perform const/copy propagation on,
1723 - none of the operands or target are subsequently modified in the block
1725 Currently src must be a pseudo-reg or a const_int.
1727 TABLE is the table computed. */
1730 compute_hash_table_work (struct hash_table_d
*table
)
1734 /* re-Cache any INSN_LIST nodes we have allocated. */
1735 clear_modify_mem_tables ();
1736 /* Some working arrays used to track first and last set in each block. */
1737 reg_avail_info
= GNEWVEC (struct reg_avail_info
, max_reg_num ());
1739 for (i
= 0; i
< max_reg_num (); ++i
)
1740 reg_avail_info
[i
].last_bb
= NULL
;
1742 FOR_EACH_BB (current_bb
)
1747 /* First pass over the instructions records information used to
1748 determine when registers and memory are first and last set. */
1749 FOR_BB_INSNS (current_bb
, insn
)
1751 if (!NONDEBUG_INSN_P (insn
))
1756 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
1757 if (TEST_HARD_REG_BIT (regs_invalidated_by_call
, regno
))
1758 record_last_reg_set_info (insn
, regno
);
1763 note_stores (PATTERN (insn
), record_last_set_info
, insn
);
1766 /* Insert implicit sets in the hash table. */
1768 && implicit_sets
[current_bb
->index
] != NULL_RTX
)
1769 hash_scan_set (implicit_sets
[current_bb
->index
],
1770 BB_HEAD (current_bb
), table
);
1772 /* The next pass builds the hash table. */
1773 FOR_BB_INSNS (current_bb
, insn
)
1774 if (NONDEBUG_INSN_P (insn
))
1775 hash_scan_insn (insn
, table
);
1778 free (reg_avail_info
);
1779 reg_avail_info
= NULL
;
1782 /* Allocate space for the set/expr hash TABLE.
1783 It is used to determine the number of buckets to use.
1784 SET_P determines whether set or expression table will
1788 alloc_hash_table (struct hash_table_d
*table
, int set_p
)
1792 n
= get_max_insn_count ();
1794 table
->size
= n
/ 4;
1795 if (table
->size
< 11)
1798 /* Attempt to maintain efficient use of hash table.
1799 Making it an odd number is simplest for now.
1800 ??? Later take some measurements. */
1802 n
= table
->size
* sizeof (struct expr
*);
1803 table
->table
= GNEWVAR (struct expr
*, n
);
1804 table
->set_p
= set_p
;
1807 /* Free things allocated by alloc_hash_table. */
1810 free_hash_table (struct hash_table_d
*table
)
1812 free (table
->table
);
1815 /* Compute the hash TABLE for doing copy/const propagation or
1816 expression hash table. */
1819 compute_hash_table (struct hash_table_d
*table
)
1821 /* Initialize count of number of entries in hash table. */
1823 memset (table
->table
, 0, table
->size
* sizeof (struct expr
*));
1825 compute_hash_table_work (table
);
1828 /* Expression tracking support. */
1830 /* Lookup REGNO in the set TABLE. The result is a pointer to the
1831 table entry, or NULL if not found. */
1833 static struct expr
*
1834 lookup_set (unsigned int regno
, struct hash_table_d
*table
)
1836 unsigned int hash
= hash_set (regno
, table
->size
);
1839 expr
= table
->table
[hash
];
1841 while (expr
&& REGNO (SET_DEST (expr
->expr
)) != regno
)
1842 expr
= expr
->next_same_hash
;
1847 /* Return the next entry for REGNO in list EXPR. */
1849 static struct expr
*
1850 next_set (unsigned int regno
, struct expr
*expr
)
1853 expr
= expr
->next_same_hash
;
1854 while (expr
&& REGNO (SET_DEST (expr
->expr
)) != regno
);
1859 /* Like free_INSN_LIST_list or free_EXPR_LIST_list, except that the node
1860 types may be mixed. */
1863 free_insn_expr_list_list (rtx
*listp
)
1867 for (list
= *listp
; list
; list
= next
)
1869 next
= XEXP (list
, 1);
1870 if (GET_CODE (list
) == EXPR_LIST
)
1871 free_EXPR_LIST_node (list
);
1873 free_INSN_LIST_node (list
);
1879 /* Clear canon_modify_mem_list and modify_mem_list tables. */
1881 clear_modify_mem_tables (void)
1886 EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set
, 0, i
, bi
)
1888 free_INSN_LIST_list (modify_mem_list
+ i
);
1889 free_insn_expr_list_list (canon_modify_mem_list
+ i
);
1891 bitmap_clear (modify_mem_list_set
);
1892 bitmap_clear (blocks_with_calls
);
1895 /* Release memory used by modify_mem_list_set. */
1898 free_modify_mem_tables (void)
1900 clear_modify_mem_tables ();
1901 free (modify_mem_list
);
1902 free (canon_modify_mem_list
);
1903 modify_mem_list
= 0;
1904 canon_modify_mem_list
= 0;
1907 /* Reset tables used to keep track of what's still available [since the
1908 start of the block]. */
1911 reset_opr_set_tables (void)
1913 /* Maintain a bitmap of which regs have been set since beginning of
1915 CLEAR_REG_SET (reg_set_bitmap
);
1917 /* Also keep a record of the last instruction to modify memory.
1918 For now this is very trivial, we only record whether any memory
1919 location has been modified. */
1920 clear_modify_mem_tables ();
1923 /* Return nonzero if the operands of X are not set before INSN in
1924 INSN's basic block. */
1927 oprs_not_set_p (const_rtx x
, const_rtx insn
)
1936 code
= GET_CODE (x
);
1953 if (load_killed_in_block_p (BLOCK_FOR_INSN (insn
),
1954 DF_INSN_LUID (insn
), x
, 0))
1957 return oprs_not_set_p (XEXP (x
, 0), insn
);
1960 return ! REGNO_REG_SET_P (reg_set_bitmap
, REGNO (x
));
1966 for (i
= GET_RTX_LENGTH (code
) - 1, fmt
= GET_RTX_FORMAT (code
); i
>= 0; i
--)
1970 /* If we are about to do the last recursive call
1971 needed at this level, change it into iteration.
1972 This function is called enough to be worth it. */
1974 return oprs_not_set_p (XEXP (x
, i
), insn
);
1976 if (! oprs_not_set_p (XEXP (x
, i
), insn
))
1979 else if (fmt
[i
] == 'E')
1980 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1981 if (! oprs_not_set_p (XVECEXP (x
, i
, j
), insn
))
1988 /* Mark things set by a CALL. */
1991 mark_call (rtx insn
)
1993 if (! RTL_CONST_OR_PURE_CALL_P (insn
))
1994 record_last_mem_set_info (insn
);
1997 /* Mark things set by a SET. */
2000 mark_set (rtx pat
, rtx insn
)
2002 rtx dest
= SET_DEST (pat
);
2004 while (GET_CODE (dest
) == SUBREG
2005 || GET_CODE (dest
) == ZERO_EXTRACT
2006 || GET_CODE (dest
) == STRICT_LOW_PART
)
2007 dest
= XEXP (dest
, 0);
2010 SET_REGNO_REG_SET (reg_set_bitmap
, REGNO (dest
));
2011 else if (MEM_P (dest
))
2012 record_last_mem_set_info (insn
);
2014 if (GET_CODE (SET_SRC (pat
)) == CALL
)
2018 /* Record things set by a CLOBBER. */
2021 mark_clobber (rtx pat
, rtx insn
)
2023 rtx clob
= XEXP (pat
, 0);
2025 while (GET_CODE (clob
) == SUBREG
|| GET_CODE (clob
) == STRICT_LOW_PART
)
2026 clob
= XEXP (clob
, 0);
2029 SET_REGNO_REG_SET (reg_set_bitmap
, REGNO (clob
));
2031 record_last_mem_set_info (insn
);
2034 /* Record things set by INSN.
2035 This data is used by oprs_not_set_p. */
2038 mark_oprs_set (rtx insn
)
2040 rtx pat
= PATTERN (insn
);
2043 if (GET_CODE (pat
) == SET
)
2044 mark_set (pat
, insn
);
2045 else if (GET_CODE (pat
) == PARALLEL
)
2046 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
2048 rtx x
= XVECEXP (pat
, 0, i
);
2050 if (GET_CODE (x
) == SET
)
2052 else if (GET_CODE (x
) == CLOBBER
)
2053 mark_clobber (x
, insn
);
2054 else if (GET_CODE (x
) == CALL
)
2058 else if (GET_CODE (pat
) == CLOBBER
)
2059 mark_clobber (pat
, insn
);
2060 else if (GET_CODE (pat
) == CALL
)
2065 /* Compute copy/constant propagation working variables. */
2067 /* Local properties of assignments. */
2068 static sbitmap
*cprop_pavloc
;
2069 static sbitmap
*cprop_absaltered
;
2071 /* Global properties of assignments (computed from the local properties). */
2072 static sbitmap
*cprop_avin
;
2073 static sbitmap
*cprop_avout
;
2075 /* Allocate vars used for copy/const propagation. N_BLOCKS is the number of
2076 basic blocks. N_SETS is the number of sets. */
2079 alloc_cprop_mem (int n_blocks
, int n_sets
)
2081 cprop_pavloc
= sbitmap_vector_alloc (n_blocks
, n_sets
);
2082 cprop_absaltered
= sbitmap_vector_alloc (n_blocks
, n_sets
);
2084 cprop_avin
= sbitmap_vector_alloc (n_blocks
, n_sets
);
2085 cprop_avout
= sbitmap_vector_alloc (n_blocks
, n_sets
);
2088 /* Free vars used by copy/const propagation. */
2091 free_cprop_mem (void)
2093 sbitmap_vector_free (cprop_pavloc
);
2094 sbitmap_vector_free (cprop_absaltered
);
2095 sbitmap_vector_free (cprop_avin
);
2096 sbitmap_vector_free (cprop_avout
);
2099 /* For each block, compute whether X is transparent. X is either an
2100 expression or an assignment [though we don't care which, for this context
2101 an assignment is treated as an expression]. For each block where an
2102 element of X is modified, set (SET_P == 1) or reset (SET_P == 0) the INDX
2106 compute_transp (const_rtx x
, int indx
, sbitmap
*bmap
, int set_p
)
2112 /* repeat is used to turn tail-recursion into iteration since GCC
2113 can't do it when there's no return value. */
2119 code
= GET_CODE (x
);
2126 for (def
= DF_REG_DEF_CHAIN (REGNO (x
));
2128 def
= DF_REF_NEXT_REG (def
))
2129 SET_BIT (bmap
[DF_REF_BB (def
)->index
], indx
);
2134 for (def
= DF_REG_DEF_CHAIN (REGNO (x
));
2136 def
= DF_REF_NEXT_REG (def
))
2137 RESET_BIT (bmap
[DF_REF_BB (def
)->index
], indx
);
2143 if (! MEM_READONLY_P (x
))
2148 /* First handle all the blocks with calls. We don't need to
2149 do any list walking for them. */
2150 EXECUTE_IF_SET_IN_BITMAP (blocks_with_calls
, 0, bb_index
, bi
)
2153 SET_BIT (bmap
[bb_index
], indx
);
2155 RESET_BIT (bmap
[bb_index
], indx
);
2158 /* Now iterate over the blocks which have memory modifications
2159 but which do not have any calls. */
2160 EXECUTE_IF_AND_COMPL_IN_BITMAP (modify_mem_list_set
,
2164 rtx list_entry
= canon_modify_mem_list
[bb_index
];
2168 rtx dest
, dest_addr
;
2170 /* LIST_ENTRY must be an INSN of some kind that sets memory.
2171 Examine each hunk of memory that is modified. */
2173 dest
= XEXP (list_entry
, 0);
2174 list_entry
= XEXP (list_entry
, 1);
2175 dest_addr
= XEXP (list_entry
, 0);
2177 if (canon_true_dependence (dest
, GET_MODE (dest
), dest_addr
,
2178 x
, NULL_RTX
, rtx_addr_varies_p
))
2181 SET_BIT (bmap
[bb_index
], indx
);
2183 RESET_BIT (bmap
[bb_index
], indx
);
2186 list_entry
= XEXP (list_entry
, 1);
2211 for (i
= GET_RTX_LENGTH (code
) - 1, fmt
= GET_RTX_FORMAT (code
); i
>= 0; i
--)
2215 /* If we are about to do the last recursive call
2216 needed at this level, change it into iteration.
2217 This function is called enough to be worth it. */
2224 compute_transp (XEXP (x
, i
), indx
, bmap
, set_p
);
2226 else if (fmt
[i
] == 'E')
2227 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2228 compute_transp (XVECEXP (x
, i
, j
), indx
, bmap
, set_p
);
2232 /* Top level routine to do the dataflow analysis needed by copy/const
2236 compute_cprop_data (void)
2238 compute_local_properties (cprop_absaltered
, cprop_pavloc
, NULL
, &set_hash_table
);
2239 compute_available (cprop_pavloc
, cprop_absaltered
,
2240 cprop_avout
, cprop_avin
);
2243 /* Copy/constant propagation. */
2245 /* Maximum number of register uses in an insn that we handle. */
2248 /* Table of uses found in an insn.
2249 Allocated statically to avoid alloc/free complexity and overhead. */
2250 static struct reg_use reg_use_table
[MAX_USES
];
2252 /* Index into `reg_use_table' while building it. */
2253 static int reg_use_count
;
2255 /* Set up a list of register numbers used in INSN. The found uses are stored
2256 in `reg_use_table'. `reg_use_count' is initialized to zero before entry,
2257 and contains the number of uses in the table upon exit.
2259 ??? If a register appears multiple times we will record it multiple times.
2260 This doesn't hurt anything but it will slow things down. */
2263 find_used_regs (rtx
*xptr
, void *data ATTRIBUTE_UNUSED
)
2270 /* repeat is used to turn tail-recursion into iteration since GCC
2271 can't do it when there's no return value. */
2276 code
= GET_CODE (x
);
2279 if (reg_use_count
== MAX_USES
)
2282 reg_use_table
[reg_use_count
].reg_rtx
= x
;
2286 /* Recursively scan the operands of this expression. */
2288 for (i
= GET_RTX_LENGTH (code
) - 1, fmt
= GET_RTX_FORMAT (code
); i
>= 0; i
--)
2292 /* If we are about to do the last recursive call
2293 needed at this level, change it into iteration.
2294 This function is called enough to be worth it. */
2301 find_used_regs (&XEXP (x
, i
), data
);
2303 else if (fmt
[i
] == 'E')
2304 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2305 find_used_regs (&XVECEXP (x
, i
, j
), data
);
2309 /* Try to replace all non-SET_DEST occurrences of FROM in INSN with TO.
2310 Returns nonzero is successful. */
2313 try_replace_reg (rtx from
, rtx to
, rtx insn
)
2315 rtx note
= find_reg_equal_equiv_note (insn
);
2318 rtx set
= single_set (insn
);
2320 /* Usually we substitute easy stuff, so we won't copy everything.
2321 We however need to take care to not duplicate non-trivial CONST
2325 validate_replace_src_group (from
, to
, insn
);
2326 if (num_changes_pending () && apply_change_group ())
2329 /* Try to simplify SET_SRC if we have substituted a constant. */
2330 if (success
&& set
&& CONSTANT_P (to
))
2332 src
= simplify_rtx (SET_SRC (set
));
2335 validate_change (insn
, &SET_SRC (set
), src
, 0);
2338 /* If there is already a REG_EQUAL note, update the expression in it
2339 with our replacement. */
2340 if (note
!= 0 && REG_NOTE_KIND (note
) == REG_EQUAL
)
2341 set_unique_reg_note (insn
, REG_EQUAL
,
2342 simplify_replace_rtx (XEXP (note
, 0), from
, to
));
2343 if (!success
&& set
&& reg_mentioned_p (from
, SET_SRC (set
)))
2345 /* If above failed and this is a single set, try to simplify the source of
2346 the set given our substitution. We could perhaps try this for multiple
2347 SETs, but it probably won't buy us anything. */
2348 src
= simplify_replace_rtx (SET_SRC (set
), from
, to
);
2350 if (!rtx_equal_p (src
, SET_SRC (set
))
2351 && validate_change (insn
, &SET_SRC (set
), src
, 0))
2354 /* If we've failed perform the replacement, have a single SET to
2355 a REG destination and don't yet have a note, add a REG_EQUAL note
2356 to not lose information. */
2357 if (!success
&& note
== 0 && set
!= 0 && REG_P (SET_DEST (set
)))
2358 note
= set_unique_reg_note (insn
, REG_EQUAL
, copy_rtx (src
));
2361 /* REG_EQUAL may get simplified into register.
2362 We don't allow that. Remove that note. This code ought
2363 not to happen, because previous code ought to synthesize
2364 reg-reg move, but be on the safe side. */
2365 if (note
&& REG_NOTE_KIND (note
) == REG_EQUAL
&& REG_P (XEXP (note
, 0)))
2366 remove_note (insn
, note
);
2371 /* Find a set of REGNOs that are available on entry to INSN's block. Returns
2372 NULL no such set is found. */
2374 static struct expr
*
2375 find_avail_set (int regno
, rtx insn
)
2377 /* SET1 contains the last set found that can be returned to the caller for
2378 use in a substitution. */
2379 struct expr
*set1
= 0;
2381 /* Loops are not possible here. To get a loop we would need two sets
2382 available at the start of the block containing INSN. i.e. we would
2383 need two sets like this available at the start of the block:
2385 (set (reg X) (reg Y))
2386 (set (reg Y) (reg X))
2388 This can not happen since the set of (reg Y) would have killed the
2389 set of (reg X) making it unavailable at the start of this block. */
2393 struct expr
*set
= lookup_set (regno
, &set_hash_table
);
2395 /* Find a set that is available at the start of the block
2396 which contains INSN. */
2399 if (TEST_BIT (cprop_avin
[BLOCK_FOR_INSN (insn
)->index
],
2402 set
= next_set (regno
, set
);
2405 /* If no available set was found we've reached the end of the
2406 (possibly empty) copy chain. */
2410 gcc_assert (GET_CODE (set
->expr
) == SET
);
2412 src
= SET_SRC (set
->expr
);
2414 /* We know the set is available.
2415 Now check that SRC is ANTLOC (i.e. none of the source operands
2416 have changed since the start of the block).
2418 If the source operand changed, we may still use it for the next
2419 iteration of this loop, but we may not use it for substitutions. */
2421 if (gcse_constant_p (src
) || oprs_not_set_p (src
, insn
))
2424 /* If the source of the set is anything except a register, then
2425 we have reached the end of the copy chain. */
2429 /* Follow the copy chain, i.e. start another iteration of the loop
2430 and see if we have an available copy into SRC. */
2431 regno
= REGNO (src
);
2434 /* SET1 holds the last set that was available and anticipatable at
2439 /* Subroutine of cprop_insn that tries to propagate constants into
2440 JUMP_INSNS. JUMP must be a conditional jump. If SETCC is non-NULL
2441 it is the instruction that immediately precedes JUMP, and must be a
2442 single SET of a register. FROM is what we will try to replace,
2443 SRC is the constant we will try to substitute for it. Returns nonzero
2444 if a change was made. */
2447 cprop_jump (basic_block bb
, rtx setcc
, rtx jump
, rtx from
, rtx src
)
2449 rtx new_rtx
, set_src
, note_src
;
2450 rtx set
= pc_set (jump
);
2451 rtx note
= find_reg_equal_equiv_note (jump
);
2455 note_src
= XEXP (note
, 0);
2456 if (GET_CODE (note_src
) == EXPR_LIST
)
2457 note_src
= NULL_RTX
;
2459 else note_src
= NULL_RTX
;
2461 /* Prefer REG_EQUAL notes except those containing EXPR_LISTs. */
2462 set_src
= note_src
? note_src
: SET_SRC (set
);
2464 /* First substitute the SETCC condition into the JUMP instruction,
2465 then substitute that given values into this expanded JUMP. */
2466 if (setcc
!= NULL_RTX
2467 && !modified_between_p (from
, setcc
, jump
)
2468 && !modified_between_p (src
, setcc
, jump
))
2471 rtx setcc_set
= single_set (setcc
);
2472 rtx setcc_note
= find_reg_equal_equiv_note (setcc
);
2473 setcc_src
= (setcc_note
&& GET_CODE (XEXP (setcc_note
, 0)) != EXPR_LIST
)
2474 ? XEXP (setcc_note
, 0) : SET_SRC (setcc_set
);
2475 set_src
= simplify_replace_rtx (set_src
, SET_DEST (setcc_set
),
2481 new_rtx
= simplify_replace_rtx (set_src
, from
, src
);
2483 /* If no simplification can be made, then try the next register. */
2484 if (rtx_equal_p (new_rtx
, SET_SRC (set
)))
2487 /* If this is now a no-op delete it, otherwise this must be a valid insn. */
2488 if (new_rtx
== pc_rtx
)
2492 /* Ensure the value computed inside the jump insn to be equivalent
2493 to one computed by setcc. */
2494 if (setcc
&& modified_in_p (new_rtx
, setcc
))
2496 if (! validate_unshare_change (jump
, &SET_SRC (set
), new_rtx
, 0))
2498 /* When (some) constants are not valid in a comparison, and there
2499 are two registers to be replaced by constants before the entire
2500 comparison can be folded into a constant, we need to keep
2501 intermediate information in REG_EQUAL notes. For targets with
2502 separate compare insns, such notes are added by try_replace_reg.
2503 When we have a combined compare-and-branch instruction, however,
2504 we need to attach a note to the branch itself to make this
2505 optimization work. */
2507 if (!rtx_equal_p (new_rtx
, note_src
))
2508 set_unique_reg_note (jump
, REG_EQUAL
, copy_rtx (new_rtx
));
2512 /* Remove REG_EQUAL note after simplification. */
2514 remove_note (jump
, note
);
2518 /* Delete the cc0 setter. */
2519 if (setcc
!= NULL
&& CC0_P (SET_DEST (single_set (setcc
))))
2520 delete_insn (setcc
);
2523 global_const_prop_count
++;
2524 if (dump_file
!= NULL
)
2527 "GLOBAL CONST-PROP: Replacing reg %d in jump_insn %d with constant ",
2528 REGNO (from
), INSN_UID (jump
));
2529 print_rtl (dump_file
, src
);
2530 fprintf (dump_file
, "\n");
2532 purge_dead_edges (bb
);
2534 /* If a conditional jump has been changed into unconditional jump, remove
2535 the jump and make the edge fallthru - this is always called in
2537 if (new_rtx
!= pc_rtx
&& simplejump_p (jump
))
2542 for (ei
= ei_start (bb
->succs
); (e
= ei_safe_edge (ei
)); ei_next (&ei
))
2543 if (e
->dest
!= EXIT_BLOCK_PTR
2544 && BB_HEAD (e
->dest
) == JUMP_LABEL (jump
))
2546 e
->flags
|= EDGE_FALLTHRU
;
2556 constprop_register (rtx insn
, rtx from
, rtx to
)
2560 /* Check for reg or cc0 setting instructions followed by
2561 conditional branch instructions first. */
2562 if ((sset
= single_set (insn
)) != NULL
2564 && any_condjump_p (NEXT_INSN (insn
)) && onlyjump_p (NEXT_INSN (insn
)))
2566 rtx dest
= SET_DEST (sset
);
2567 if ((REG_P (dest
) || CC0_P (dest
))
2568 && cprop_jump (BLOCK_FOR_INSN (insn
), insn
, NEXT_INSN (insn
), from
, to
))
2572 /* Handle normal insns next. */
2573 if (NONJUMP_INSN_P (insn
)
2574 && try_replace_reg (from
, to
, insn
))
2577 /* Try to propagate a CONST_INT into a conditional jump.
2578 We're pretty specific about what we will handle in this
2579 code, we can extend this as necessary over time.
2581 Right now the insn in question must look like
2582 (set (pc) (if_then_else ...)) */
2583 else if (any_condjump_p (insn
) && onlyjump_p (insn
))
2584 return cprop_jump (BLOCK_FOR_INSN (insn
), NULL
, insn
, from
, to
);
2588 /* Perform constant and copy propagation on INSN.
2589 The result is nonzero if a change was made. */
2592 cprop_insn (rtx insn
)
2594 struct reg_use
*reg_used
;
2602 note_uses (&PATTERN (insn
), find_used_regs
, NULL
);
2604 note
= find_reg_equal_equiv_note (insn
);
2606 /* We may win even when propagating constants into notes. */
2608 find_used_regs (&XEXP (note
, 0), NULL
);
2610 for (reg_used
= ®_use_table
[0]; reg_use_count
> 0;
2611 reg_used
++, reg_use_count
--)
2613 unsigned int regno
= REGNO (reg_used
->reg_rtx
);
2617 /* If the register has already been set in this block, there's
2618 nothing we can do. */
2619 if (! oprs_not_set_p (reg_used
->reg_rtx
, insn
))
2622 /* Find an assignment that sets reg_used and is available
2623 at the start of the block. */
2624 set
= find_avail_set (regno
, insn
);
2629 /* ??? We might be able to handle PARALLELs. Later. */
2630 gcc_assert (GET_CODE (pat
) == SET
);
2632 src
= SET_SRC (pat
);
2634 /* Constant propagation. */
2635 if (gcse_constant_p (src
))
2637 if (constprop_register (insn
, reg_used
->reg_rtx
, src
))
2640 global_const_prop_count
++;
2641 if (dump_file
!= NULL
)
2643 fprintf (dump_file
, "GLOBAL CONST-PROP: Replacing reg %d in ", regno
);
2644 fprintf (dump_file
, "insn %d with constant ", INSN_UID (insn
));
2645 print_rtl (dump_file
, src
);
2646 fprintf (dump_file
, "\n");
2648 if (INSN_DELETED_P (insn
))
2652 else if (REG_P (src
)
2653 && REGNO (src
) >= FIRST_PSEUDO_REGISTER
2654 && REGNO (src
) != regno
)
2656 if (try_replace_reg (reg_used
->reg_rtx
, src
, insn
))
2659 global_copy_prop_count
++;
2660 if (dump_file
!= NULL
)
2662 fprintf (dump_file
, "GLOBAL COPY-PROP: Replacing reg %d in insn %d",
2663 regno
, INSN_UID (insn
));
2664 fprintf (dump_file
, " with reg %d\n", REGNO (src
));
2667 /* The original insn setting reg_used may or may not now be
2668 deletable. We leave the deletion to flow. */
2669 /* FIXME: If it turns out that the insn isn't deletable,
2670 then we may have unnecessarily extended register lifetimes
2671 and made things worse. */
2676 if (changed
&& DEBUG_INSN_P (insn
))
2682 /* Like find_used_regs, but avoid recording uses that appear in
2683 input-output contexts such as zero_extract or pre_dec. This
2684 restricts the cases we consider to those for which local cprop
2685 can legitimately make replacements. */
2688 local_cprop_find_used_regs (rtx
*xptr
, void *data
)
2695 switch (GET_CODE (x
))
2699 case STRICT_LOW_PART
:
2708 /* Can only legitimately appear this early in the context of
2709 stack pushes for function arguments, but handle all of the
2710 codes nonetheless. */
2714 /* Setting a subreg of a register larger than word_mode leaves
2715 the non-written words unchanged. */
2716 if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x
))) > BITS_PER_WORD
)
2724 find_used_regs (xptr
, data
);
2727 /* Try to perform local const/copy propagation on X in INSN. */
2730 do_local_cprop (rtx x
, rtx insn
)
2732 rtx newreg
= NULL
, newcnst
= NULL
;
2734 /* Rule out USE instructions and ASM statements as we don't want to
2735 change the hard registers mentioned. */
2737 && (REGNO (x
) >= FIRST_PSEUDO_REGISTER
2738 || (GET_CODE (PATTERN (insn
)) != USE
2739 && asm_noperands (PATTERN (insn
)) < 0)))
2741 cselib_val
*val
= cselib_lookup (x
, GET_MODE (x
), 0, VOIDmode
);
2742 struct elt_loc_list
*l
;
2746 for (l
= val
->locs
; l
; l
= l
->next
)
2748 rtx this_rtx
= l
->loc
;
2751 if (gcse_constant_p (this_rtx
))
2753 if (REG_P (this_rtx
) && REGNO (this_rtx
) >= FIRST_PSEUDO_REGISTER
2754 /* Don't copy propagate if it has attached REG_EQUIV note.
2755 At this point this only function parameters should have
2756 REG_EQUIV notes and if the argument slot is used somewhere
2757 explicitly, it means address of parameter has been taken,
2758 so we should not extend the lifetime of the pseudo. */
2759 && (!(note
= find_reg_note (l
->setting_insn
, REG_EQUIV
, NULL_RTX
))
2760 || ! MEM_P (XEXP (note
, 0))))
2763 if (newcnst
&& constprop_register (insn
, x
, newcnst
))
2765 if (dump_file
!= NULL
)
2767 fprintf (dump_file
, "LOCAL CONST-PROP: Replacing reg %d in ",
2769 fprintf (dump_file
, "insn %d with constant ",
2771 print_rtl (dump_file
, newcnst
);
2772 fprintf (dump_file
, "\n");
2774 local_const_prop_count
++;
2777 else if (newreg
&& newreg
!= x
&& try_replace_reg (x
, newreg
, insn
))
2779 if (dump_file
!= NULL
)
2782 "LOCAL COPY-PROP: Replacing reg %d in insn %d",
2783 REGNO (x
), INSN_UID (insn
));
2784 fprintf (dump_file
, " with reg %d\n", REGNO (newreg
));
2786 local_copy_prop_count
++;
2793 /* Do local const/copy propagation (i.e. within each basic block). */
2796 local_cprop_pass (void)
2800 struct reg_use
*reg_used
;
2801 bool changed
= false;
2806 FOR_BB_INSNS (bb
, insn
)
2810 rtx note
= find_reg_equal_equiv_note (insn
);
2814 note_uses (&PATTERN (insn
), local_cprop_find_used_regs
,
2817 local_cprop_find_used_regs (&XEXP (note
, 0), NULL
);
2819 for (reg_used
= ®_use_table
[0]; reg_use_count
> 0;
2820 reg_used
++, reg_use_count
--)
2822 if (do_local_cprop (reg_used
->reg_rtx
, insn
))
2828 if (INSN_DELETED_P (insn
))
2831 while (reg_use_count
);
2833 cselib_process_insn (insn
);
2836 /* Forget everything at the end of a basic block. */
2837 cselib_clear_table ();
2845 /* Similar to get_condition, only the resulting condition must be
2846 valid at JUMP, instead of at EARLIEST.
2848 This differs from noce_get_condition in ifcvt.c in that we prefer not to
2849 settle for the condition variable in the jump instruction being integral.
2850 We prefer to be able to record the value of a user variable, rather than
2851 the value of a temporary used in a condition. This could be solved by
2852 recording the value of *every* register scanned by canonicalize_condition,
2853 but this would require some code reorganization. */
2856 fis_get_condition (rtx jump
)
2858 return get_condition (jump
, NULL
, false, true);
2861 /* Check the comparison COND to see if we can safely form an implicit set from
2862 it. COND is either an EQ or NE comparison. */
2865 implicit_set_cond_p (const_rtx cond
)
2867 const enum machine_mode mode
= GET_MODE (XEXP (cond
, 0));
2868 const_rtx cst
= XEXP (cond
, 1);
2870 /* We can't perform this optimization if either operand might be or might
2871 contain a signed zero. */
2872 if (HONOR_SIGNED_ZEROS (mode
))
2874 /* It is sufficient to check if CST is or contains a zero. We must
2875 handle float, complex, and vector. If any subpart is a zero, then
2876 the optimization can't be performed. */
2877 /* ??? The complex and vector checks are not implemented yet. We just
2878 always return zero for them. */
2879 if (GET_CODE (cst
) == CONST_DOUBLE
)
2882 REAL_VALUE_FROM_CONST_DOUBLE (d
, cst
);
2883 if (REAL_VALUES_EQUAL (d
, dconst0
))
2890 return gcse_constant_p (cst
);
2893 /* Find the implicit sets of a function. An "implicit set" is a constraint
2894 on the value of a variable, implied by a conditional jump. For example,
2895 following "if (x == 2)", the then branch may be optimized as though the
2896 conditional performed an "explicit set", in this example, "x = 2". This
2897 function records the set patterns that are implicit at the start of each
2900 FIXME: This would be more effective if critical edges are pre-split. As
2901 it is now, we can't record implicit sets for blocks that have
2902 critical successor edges. This results in missed optimizations
2903 and in more (unnecessary) work in cfgcleanup.c:thread_jump(). */
2906 find_implicit_sets (void)
2908 basic_block bb
, dest
;
2914 /* Check for more than one successor. */
2915 if (EDGE_COUNT (bb
->succs
) > 1)
2917 cond
= fis_get_condition (BB_END (bb
));
2920 && (GET_CODE (cond
) == EQ
|| GET_CODE (cond
) == NE
)
2921 && REG_P (XEXP (cond
, 0))
2922 && REGNO (XEXP (cond
, 0)) >= FIRST_PSEUDO_REGISTER
2923 && implicit_set_cond_p (cond
))
2925 dest
= GET_CODE (cond
) == EQ
? BRANCH_EDGE (bb
)->dest
2926 : FALLTHRU_EDGE (bb
)->dest
;
2929 /* Record nothing for a critical edge. */
2930 && single_pred_p (dest
)
2931 && dest
!= EXIT_BLOCK_PTR
)
2933 new_rtx
= gen_rtx_SET (VOIDmode
, XEXP (cond
, 0),
2935 implicit_sets
[dest
->index
] = new_rtx
;
2938 fprintf(dump_file
, "Implicit set of reg %d in ",
2939 REGNO (XEXP (cond
, 0)));
2940 fprintf(dump_file
, "basic block %d\n", dest
->index
);
2948 fprintf (dump_file
, "Found %d implicit sets\n", count
);
2951 /* Bypass conditional jumps. */
2953 /* The value of last_basic_block at the beginning of the jump_bypass
2954 pass. The use of redirect_edge_and_branch_force may introduce new
2955 basic blocks, but the data flow analysis is only valid for basic
2956 block indices less than bypass_last_basic_block. */
2958 static int bypass_last_basic_block
;
2960 /* Find a set of REGNO to a constant that is available at the end of basic
2961 block BB. Returns NULL if no such set is found. Based heavily upon
2964 static struct expr
*
2965 find_bypass_set (int regno
, int bb
)
2967 struct expr
*result
= 0;
2972 struct expr
*set
= lookup_set (regno
, &set_hash_table
);
2976 if (TEST_BIT (cprop_avout
[bb
], set
->bitmap_index
))
2978 set
= next_set (regno
, set
);
2984 gcc_assert (GET_CODE (set
->expr
) == SET
);
2986 src
= SET_SRC (set
->expr
);
2987 if (gcse_constant_p (src
))
2993 regno
= REGNO (src
);
2999 /* Subroutine of bypass_block that checks whether a pseudo is killed by
3000 any of the instructions inserted on an edge. Jump bypassing places
3001 condition code setters on CFG edges using insert_insn_on_edge. This
3002 function is required to check that our data flow analysis is still
3003 valid prior to commit_edge_insertions. */
3006 reg_killed_on_edge (const_rtx reg
, const_edge e
)
3010 for (insn
= e
->insns
.r
; insn
; insn
= NEXT_INSN (insn
))
3011 if (INSN_P (insn
) && reg_set_p (reg
, insn
))
3017 /* Subroutine of bypass_conditional_jumps that attempts to bypass the given
3018 basic block BB which has more than one predecessor. If not NULL, SETCC
3019 is the first instruction of BB, which is immediately followed by JUMP_INSN
3020 JUMP. Otherwise, SETCC is NULL, and JUMP is the first insn of BB.
3021 Returns nonzero if a change was made.
3023 During the jump bypassing pass, we may place copies of SETCC instructions
3024 on CFG edges. The following routine must be careful to pay attention to
3025 these inserted insns when performing its transformations. */
3028 bypass_block (basic_block bb
, rtx setcc
, rtx jump
)
3033 int may_be_loop_header
;
3037 insn
= (setcc
!= NULL
) ? setcc
: jump
;
3039 /* Determine set of register uses in INSN. */
3041 note_uses (&PATTERN (insn
), find_used_regs
, NULL
);
3042 note
= find_reg_equal_equiv_note (insn
);
3044 find_used_regs (&XEXP (note
, 0), NULL
);
3046 may_be_loop_header
= false;
3047 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
3048 if (e
->flags
& EDGE_DFS_BACK
)
3050 may_be_loop_header
= true;
3055 for (ei
= ei_start (bb
->preds
); (e
= ei_safe_edge (ei
)); )
3059 if (e
->flags
& EDGE_COMPLEX
)
3065 /* We can't redirect edges from new basic blocks. */
3066 if (e
->src
->index
>= bypass_last_basic_block
)
3072 /* The irreducible loops created by redirecting of edges entering the
3073 loop from outside would decrease effectiveness of some of the following
3074 optimizations, so prevent this. */
3075 if (may_be_loop_header
3076 && !(e
->flags
& EDGE_DFS_BACK
))
3082 for (i
= 0; i
< reg_use_count
; i
++)
3084 struct reg_use
*reg_used
= ®_use_table
[i
];
3085 unsigned int regno
= REGNO (reg_used
->reg_rtx
);
3086 basic_block dest
, old_dest
;
3090 set
= find_bypass_set (regno
, e
->src
->index
);
3095 /* Check the data flow is valid after edge insertions. */
3096 if (e
->insns
.r
&& reg_killed_on_edge (reg_used
->reg_rtx
, e
))
3099 src
= SET_SRC (pc_set (jump
));
3102 src
= simplify_replace_rtx (src
,
3103 SET_DEST (PATTERN (setcc
)),
3104 SET_SRC (PATTERN (setcc
)));
3106 new_rtx
= simplify_replace_rtx (src
, reg_used
->reg_rtx
,
3107 SET_SRC (set
->expr
));
3109 /* Jump bypassing may have already placed instructions on
3110 edges of the CFG. We can't bypass an outgoing edge that
3111 has instructions associated with it, as these insns won't
3112 get executed if the incoming edge is redirected. */
3114 if (new_rtx
== pc_rtx
)
3116 edest
= FALLTHRU_EDGE (bb
);
3117 dest
= edest
->insns
.r
? NULL
: edest
->dest
;
3119 else if (GET_CODE (new_rtx
) == LABEL_REF
)
3121 dest
= BLOCK_FOR_INSN (XEXP (new_rtx
, 0));
3122 /* Don't bypass edges containing instructions. */
3123 edest
= find_edge (bb
, dest
);
3124 if (edest
&& edest
->insns
.r
)
3130 /* Avoid unification of the edge with other edges from original
3131 branch. We would end up emitting the instruction on "both"
3134 if (dest
&& setcc
&& !CC0_P (SET_DEST (PATTERN (setcc
)))
3135 && find_edge (e
->src
, dest
))
3141 && dest
!= EXIT_BLOCK_PTR
)
3143 redirect_edge_and_branch_force (e
, dest
);
3145 /* Copy the register setter to the redirected edge.
3146 Don't copy CC0 setters, as CC0 is dead after jump. */
3149 rtx pat
= PATTERN (setcc
);
3150 if (!CC0_P (SET_DEST (pat
)))
3151 insert_insn_on_edge (copy_insn (pat
), e
);
3154 if (dump_file
!= NULL
)
3156 fprintf (dump_file
, "JUMP-BYPASS: Proved reg %d "
3157 "in jump_insn %d equals constant ",
3158 regno
, INSN_UID (jump
));
3159 print_rtl (dump_file
, SET_SRC (set
->expr
));
3160 fprintf (dump_file
, "\nBypass edge from %d->%d to %d\n",
3161 e
->src
->index
, old_dest
->index
, dest
->index
);
3174 /* Find basic blocks with more than one predecessor that only contain a
3175 single conditional jump. If the result of the comparison is known at
3176 compile-time from any incoming edge, redirect that edge to the
3177 appropriate target. Returns nonzero if a change was made.
3179 This function is now mis-named, because we also handle indirect jumps. */
3182 bypass_conditional_jumps (void)
3190 /* Note we start at block 1. */
3191 if (ENTRY_BLOCK_PTR
->next_bb
== EXIT_BLOCK_PTR
)
3194 bypass_last_basic_block
= last_basic_block
;
3195 mark_dfs_back_edges ();
3198 FOR_BB_BETWEEN (bb
, ENTRY_BLOCK_PTR
->next_bb
->next_bb
,
3199 EXIT_BLOCK_PTR
, next_bb
)
3201 /* Check for more than one predecessor. */
3202 if (!single_pred_p (bb
))
3205 FOR_BB_INSNS (bb
, insn
)
3206 if (DEBUG_INSN_P (insn
))
3208 else if (NONJUMP_INSN_P (insn
))
3212 if (GET_CODE (PATTERN (insn
)) != SET
)
3215 dest
= SET_DEST (PATTERN (insn
));
3216 if (REG_P (dest
) || CC0_P (dest
))
3221 else if (JUMP_P (insn
))
3223 if ((any_condjump_p (insn
) || computed_jump_p (insn
))
3224 && onlyjump_p (insn
))
3225 changed
|= bypass_block (bb
, setcc
, insn
);
3228 else if (INSN_P (insn
))
3233 /* If we bypassed any register setting insns, we inserted a
3234 copy on the redirected edge. These need to be committed. */
3236 commit_edge_insertions ();
3241 /* Compute PRE+LCM working variables. */
3243 /* Local properties of expressions. */
3244 /* Nonzero for expressions that are transparent in the block. */
3245 static sbitmap
*transp
;
3247 /* Nonzero for expressions that are computed (available) in the block. */
3248 static sbitmap
*comp
;
3250 /* Nonzero for expressions that are locally anticipatable in the block. */
3251 static sbitmap
*antloc
;
3253 /* Nonzero for expressions where this block is an optimal computation
3255 static sbitmap
*pre_optimal
;
3257 /* Nonzero for expressions which are redundant in a particular block. */
3258 static sbitmap
*pre_redundant
;
3260 /* Nonzero for expressions which should be inserted on a specific edge. */
3261 static sbitmap
*pre_insert_map
;
3263 /* Nonzero for expressions which should be deleted in a specific block. */
3264 static sbitmap
*pre_delete_map
;
3266 /* Contains the edge_list returned by pre_edge_lcm. */
3267 static struct edge_list
*edge_list
;
3269 /* Allocate vars used for PRE analysis. */
3272 alloc_pre_mem (int n_blocks
, int n_exprs
)
3274 transp
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
3275 comp
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
3276 antloc
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
3279 pre_redundant
= NULL
;
3280 pre_insert_map
= NULL
;
3281 pre_delete_map
= NULL
;
3282 ae_kill
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
3284 /* pre_insert and pre_delete are allocated later. */
3287 /* Free vars used for PRE analysis. */
3292 sbitmap_vector_free (transp
);
3293 sbitmap_vector_free (comp
);
3295 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
3298 sbitmap_vector_free (pre_optimal
);
3300 sbitmap_vector_free (pre_redundant
);
3302 sbitmap_vector_free (pre_insert_map
);
3304 sbitmap_vector_free (pre_delete_map
);
3306 transp
= comp
= NULL
;
3307 pre_optimal
= pre_redundant
= pre_insert_map
= pre_delete_map
= NULL
;
3310 /* Remove certain expressions from anticipatable and transparent
3311 sets of basic blocks that have incoming abnormal edge.
3312 For PRE remove potentially trapping expressions to avoid placing
3313 them on abnormal edges. For hoisting remove memory references that
3314 can be clobbered by calls. */
3317 prune_expressions (bool pre_p
)
3319 sbitmap prune_exprs
;
3323 prune_exprs
= sbitmap_alloc (expr_hash_table
.n_elems
);
3324 sbitmap_zero (prune_exprs
);
3325 for (ui
= 0; ui
< expr_hash_table
.size
; ui
++)
3328 for (e
= expr_hash_table
.table
[ui
]; e
!= NULL
; e
= e
->next_same_hash
)
3330 /* Note potentially trapping expressions. */
3331 if (may_trap_p (e
->expr
))
3333 SET_BIT (prune_exprs
, e
->bitmap_index
);
3337 if (!pre_p
&& MEM_P (e
->expr
))
3338 /* Note memory references that can be clobbered by a call.
3339 We do not split abnormal edges in hoisting, so would
3340 a memory reference get hoisted along an abnormal edge,
3341 it would be placed /before/ the call. Therefore, only
3342 constant memory references can be hoisted along abnormal
3345 if (GET_CODE (XEXP (e
->expr
, 0)) == SYMBOL_REF
3346 && CONSTANT_POOL_ADDRESS_P (XEXP (e
->expr
, 0)))
3349 if (MEM_READONLY_P (e
->expr
)
3350 && !MEM_VOLATILE_P (e
->expr
)
3351 && MEM_NOTRAP_P (e
->expr
))
3352 /* Constant memory reference, e.g., a PIC address. */
3355 /* ??? Optimally, we would use interprocedural alias
3356 analysis to determine if this mem is actually killed
3359 SET_BIT (prune_exprs
, e
->bitmap_index
);
3369 /* If the current block is the destination of an abnormal edge, we
3370 kill all trapping (for PRE) and memory (for hoist) expressions
3371 because we won't be able to properly place the instruction on
3372 the edge. So make them neither anticipatable nor transparent.
3373 This is fairly conservative.
3375 ??? For hoisting it may be necessary to check for set-and-jump
3376 instructions here, not just for abnormal edges. The general problem
3377 is that when an expression cannot not be placed right at the end of
3378 a basic block we should account for any side-effects of a subsequent
3379 jump instructions that could clobber the expression. It would
3380 be best to implement this check along the lines of
3381 hoist_expr_reaches_here_p where the target block is already known
3382 and, hence, there's no need to conservatively prune expressions on
3383 "intermediate" set-and-jump instructions. */
3384 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
3385 if ((e
->flags
& EDGE_ABNORMAL
)
3386 && (pre_p
|| CALL_P (BB_END (e
->src
))))
3388 sbitmap_difference (antloc
[bb
->index
],
3389 antloc
[bb
->index
], prune_exprs
);
3390 sbitmap_difference (transp
[bb
->index
],
3391 transp
[bb
->index
], prune_exprs
);
3396 sbitmap_free (prune_exprs
);
3399 /* It may be necessary to insert a large number of insns on edges to
3400 make the existing occurrences of expressions fully redundant. This
3401 routine examines the set of insertions and deletions and if the ratio
3402 of insertions to deletions is too high for a particular expression, then
3403 the expression is removed from the insertion/deletion sets.
3405 N_ELEMS is the number of elements in the hash table. */
3408 prune_insertions_deletions (int n_elems
)
3410 sbitmap_iterator sbi
;
3411 sbitmap prune_exprs
;
3413 /* We always use I to iterate over blocks/edges and J to iterate over
3417 /* Counts for the number of times an expression needs to be inserted and
3418 number of times an expression can be removed as a result. */
3419 int *insertions
= GCNEWVEC (int, n_elems
);
3420 int *deletions
= GCNEWVEC (int, n_elems
);
3422 /* Set of expressions which require too many insertions relative to
3423 the number of deletions achieved. We will prune these out of the
3424 insertion/deletion sets. */
3425 prune_exprs
= sbitmap_alloc (n_elems
);
3426 sbitmap_zero (prune_exprs
);
3428 /* Iterate over the edges counting the number of times each expression
3429 needs to be inserted. */
3430 for (i
= 0; i
< (unsigned) n_edges
; i
++)
3432 EXECUTE_IF_SET_IN_SBITMAP (pre_insert_map
[i
], 0, j
, sbi
)
3436 /* Similarly for deletions, but those occur in blocks rather than on
3438 for (i
= 0; i
< (unsigned) last_basic_block
; i
++)
3440 EXECUTE_IF_SET_IN_SBITMAP (pre_delete_map
[i
], 0, j
, sbi
)
3444 /* Now that we have accurate counts, iterate over the elements in the
3445 hash table and see if any need too many insertions relative to the
3446 number of evaluations that can be removed. If so, mark them in
3448 for (j
= 0; j
< (unsigned) n_elems
; j
++)
3450 && ((unsigned) insertions
[j
] / deletions
[j
]) > MAX_GCSE_INSERTION_RATIO
)
3451 SET_BIT (prune_exprs
, j
);
3453 /* Now prune PRE_INSERT_MAP and PRE_DELETE_MAP based on PRUNE_EXPRS. */
3454 EXECUTE_IF_SET_IN_SBITMAP (prune_exprs
, 0, j
, sbi
)
3456 for (i
= 0; i
< (unsigned) n_edges
; i
++)
3457 RESET_BIT (pre_insert_map
[i
], j
);
3459 for (i
= 0; i
< (unsigned) last_basic_block
; i
++)
3460 RESET_BIT (pre_delete_map
[i
], j
);
3463 sbitmap_free (prune_exprs
);
3468 /* Top level routine to do the dataflow analysis needed by PRE. */
3471 compute_pre_data (void)
3475 compute_local_properties (transp
, comp
, antloc
, &expr_hash_table
);
3476 prune_expressions (true);
3477 sbitmap_vector_zero (ae_kill
, last_basic_block
);
3479 /* Compute ae_kill for each basic block using:
3486 sbitmap_a_or_b (ae_kill
[bb
->index
], transp
[bb
->index
], comp
[bb
->index
]);
3487 sbitmap_not (ae_kill
[bb
->index
], ae_kill
[bb
->index
]);
3490 edge_list
= pre_edge_lcm (expr_hash_table
.n_elems
, transp
, comp
, antloc
,
3491 ae_kill
, &pre_insert_map
, &pre_delete_map
);
3492 sbitmap_vector_free (antloc
);
3494 sbitmap_vector_free (ae_kill
);
3497 prune_insertions_deletions (expr_hash_table
.n_elems
);
3502 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
3505 VISITED is a pointer to a working buffer for tracking which BB's have
3506 been visited. It is NULL for the top-level call.
3508 We treat reaching expressions that go through blocks containing the same
3509 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
3510 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
3511 2 as not reaching. The intent is to improve the probability of finding
3512 only one reaching expression and to reduce register lifetimes by picking
3513 the closest such expression. */
3516 pre_expr_reaches_here_p_work (basic_block occr_bb
, struct expr
*expr
, basic_block bb
, char *visited
)
3521 FOR_EACH_EDGE (pred
, ei
, bb
->preds
)
3523 basic_block pred_bb
= pred
->src
;
3525 if (pred
->src
== ENTRY_BLOCK_PTR
3526 /* Has predecessor has already been visited? */
3527 || visited
[pred_bb
->index
])
3528 ;/* Nothing to do. */
3530 /* Does this predecessor generate this expression? */
3531 else if (TEST_BIT (comp
[pred_bb
->index
], expr
->bitmap_index
))
3533 /* Is this the occurrence we're looking for?
3534 Note that there's only one generating occurrence per block
3535 so we just need to check the block number. */
3536 if (occr_bb
== pred_bb
)
3539 visited
[pred_bb
->index
] = 1;
3541 /* Ignore this predecessor if it kills the expression. */
3542 else if (! TEST_BIT (transp
[pred_bb
->index
], expr
->bitmap_index
))
3543 visited
[pred_bb
->index
] = 1;
3545 /* Neither gen nor kill. */
3548 visited
[pred_bb
->index
] = 1;
3549 if (pre_expr_reaches_here_p_work (occr_bb
, expr
, pred_bb
, visited
))
3554 /* All paths have been checked. */
3558 /* The wrapper for pre_expr_reaches_here_work that ensures that any
3559 memory allocated for that function is returned. */
3562 pre_expr_reaches_here_p (basic_block occr_bb
, struct expr
*expr
, basic_block bb
)
3565 char *visited
= XCNEWVEC (char, last_basic_block
);
3567 rval
= pre_expr_reaches_here_p_work (occr_bb
, expr
, bb
, visited
);
3574 /* Given an expr, generate RTL which we can insert at the end of a BB,
3575 or on an edge. Set the block number of any insns generated to
3579 process_insert_insn (struct expr
*expr
)
3581 rtx reg
= expr
->reaching_reg
;
3582 rtx exp
= copy_rtx (expr
->expr
);
3587 /* If the expression is something that's an operand, like a constant,
3588 just copy it to a register. */
3589 if (general_operand (exp
, GET_MODE (reg
)))
3590 emit_move_insn (reg
, exp
);
3592 /* Otherwise, make a new insn to compute this expression and make sure the
3593 insn will be recognized (this also adds any needed CLOBBERs). Copy the
3594 expression to make sure we don't have any sharing issues. */
3597 rtx insn
= emit_insn (gen_rtx_SET (VOIDmode
, reg
, exp
));
3599 if (insn_invalid_p (insn
))
3610 /* Add EXPR to the end of basic block BB.
3612 This is used by both the PRE and code hoisting. */
3615 insert_insn_end_basic_block (struct expr
*expr
, basic_block bb
)
3617 rtx insn
= BB_END (bb
);
3619 rtx reg
= expr
->reaching_reg
;
3620 int regno
= REGNO (reg
);
3623 pat
= process_insert_insn (expr
);
3624 gcc_assert (pat
&& INSN_P (pat
));
3627 while (NEXT_INSN (pat_end
) != NULL_RTX
)
3628 pat_end
= NEXT_INSN (pat_end
);
3630 /* If the last insn is a jump, insert EXPR in front [taking care to
3631 handle cc0, etc. properly]. Similarly we need to care trapping
3632 instructions in presence of non-call exceptions. */
3635 || (NONJUMP_INSN_P (insn
)
3636 && (!single_succ_p (bb
)
3637 || single_succ_edge (bb
)->flags
& EDGE_ABNORMAL
)))
3643 /* If this is a jump table, then we can't insert stuff here. Since
3644 we know the previous real insn must be the tablejump, we insert
3645 the new instruction just before the tablejump. */
3646 if (GET_CODE (PATTERN (insn
)) == ADDR_VEC
3647 || GET_CODE (PATTERN (insn
)) == ADDR_DIFF_VEC
)
3648 insn
= prev_active_insn (insn
);
3651 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
3652 if cc0 isn't set. */
3653 note
= find_reg_note (insn
, REG_CC_SETTER
, NULL_RTX
);
3655 insn
= XEXP (note
, 0);
3658 rtx maybe_cc0_setter
= prev_nonnote_insn (insn
);
3659 if (maybe_cc0_setter
3660 && INSN_P (maybe_cc0_setter
)
3661 && sets_cc0_p (PATTERN (maybe_cc0_setter
)))
3662 insn
= maybe_cc0_setter
;
3665 /* FIXME: What if something in cc0/jump uses value set in new insn? */
3666 new_insn
= emit_insn_before_noloc (pat
, insn
, bb
);
3669 /* Likewise if the last insn is a call, as will happen in the presence
3670 of exception handling. */
3671 else if (CALL_P (insn
)
3672 && (!single_succ_p (bb
)
3673 || single_succ_edge (bb
)->flags
& EDGE_ABNORMAL
))
3675 /* Keeping in mind targets with small register classes and parameters
3676 in registers, we search backward and place the instructions before
3677 the first parameter is loaded. Do this for everyone for consistency
3678 and a presumption that we'll get better code elsewhere as well. */
3680 /* Since different machines initialize their parameter registers
3681 in different orders, assume nothing. Collect the set of all
3682 parameter registers. */
3683 insn
= find_first_parameter_load (insn
, BB_HEAD (bb
));
3685 /* If we found all the parameter loads, then we want to insert
3686 before the first parameter load.
3688 If we did not find all the parameter loads, then we might have
3689 stopped on the head of the block, which could be a CODE_LABEL.
3690 If we inserted before the CODE_LABEL, then we would be putting
3691 the insn in the wrong basic block. In that case, put the insn
3692 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
3693 while (LABEL_P (insn
)
3694 || NOTE_INSN_BASIC_BLOCK_P (insn
))
3695 insn
= NEXT_INSN (insn
);
3697 new_insn
= emit_insn_before_noloc (pat
, insn
, bb
);
3700 new_insn
= emit_insn_after_noloc (pat
, insn
, bb
);
3705 add_label_notes (PATTERN (pat
), new_insn
);
3708 pat
= NEXT_INSN (pat
);
3711 gcse_create_count
++;
3715 fprintf (dump_file
, "PRE/HOIST: end of bb %d, insn %d, ",
3716 bb
->index
, INSN_UID (new_insn
));
3717 fprintf (dump_file
, "copying expression %d to reg %d\n",
3718 expr
->bitmap_index
, regno
);
3722 /* Insert partially redundant expressions on edges in the CFG to make
3723 the expressions fully redundant. */
3726 pre_edge_insert (struct edge_list
*edge_list
, struct expr
**index_map
)
3728 int e
, i
, j
, num_edges
, set_size
, did_insert
= 0;
3731 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
3732 if it reaches any of the deleted expressions. */
3734 set_size
= pre_insert_map
[0]->size
;
3735 num_edges
= NUM_EDGES (edge_list
);
3736 inserted
= sbitmap_vector_alloc (num_edges
, expr_hash_table
.n_elems
);
3737 sbitmap_vector_zero (inserted
, num_edges
);
3739 for (e
= 0; e
< num_edges
; e
++)
3742 basic_block bb
= INDEX_EDGE_PRED_BB (edge_list
, e
);
3744 for (i
= indx
= 0; i
< set_size
; i
++, indx
+= SBITMAP_ELT_BITS
)
3746 SBITMAP_ELT_TYPE insert
= pre_insert_map
[e
]->elms
[i
];
3748 for (j
= indx
; insert
&& j
< (int) expr_hash_table
.n_elems
; j
++, insert
>>= 1)
3749 if ((insert
& 1) != 0 && index_map
[j
]->reaching_reg
!= NULL_RTX
)
3751 struct expr
*expr
= index_map
[j
];
3754 /* Now look at each deleted occurrence of this expression. */
3755 for (occr
= expr
->antic_occr
; occr
!= NULL
; occr
= occr
->next
)
3757 if (! occr
->deleted_p
)
3760 /* Insert this expression on this edge if it would
3761 reach the deleted occurrence in BB. */
3762 if (!TEST_BIT (inserted
[e
], j
))
3765 edge eg
= INDEX_EDGE (edge_list
, e
);
3767 /* We can't insert anything on an abnormal and
3768 critical edge, so we insert the insn at the end of
3769 the previous block. There are several alternatives
3770 detailed in Morgans book P277 (sec 10.5) for
3771 handling this situation. This one is easiest for
3774 if (eg
->flags
& EDGE_ABNORMAL
)
3775 insert_insn_end_basic_block (index_map
[j
], bb
);
3778 insn
= process_insert_insn (index_map
[j
]);
3779 insert_insn_on_edge (insn
, eg
);
3784 fprintf (dump_file
, "PRE: edge (%d,%d), ",
3786 INDEX_EDGE_SUCC_BB (edge_list
, e
)->index
);
3787 fprintf (dump_file
, "copy expression %d\n",
3788 expr
->bitmap_index
);
3791 update_ld_motion_stores (expr
);
3792 SET_BIT (inserted
[e
], j
);
3794 gcse_create_count
++;
3801 sbitmap_vector_free (inserted
);
3805 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
3806 Given "old_reg <- expr" (INSN), instead of adding after it
3807 reaching_reg <- old_reg
3808 it's better to do the following:
3809 reaching_reg <- expr
3810 old_reg <- reaching_reg
3811 because this way copy propagation can discover additional PRE
3812 opportunities. But if this fails, we try the old way.
3813 When "expr" is a store, i.e.
3814 given "MEM <- old_reg", instead of adding after it
3815 reaching_reg <- old_reg
3816 it's better to add it before as follows:
3817 reaching_reg <- old_reg
3818 MEM <- reaching_reg. */
3821 pre_insert_copy_insn (struct expr
*expr
, rtx insn
)
3823 rtx reg
= expr
->reaching_reg
;
3824 int regno
= REGNO (reg
);
3825 int indx
= expr
->bitmap_index
;
3826 rtx pat
= PATTERN (insn
);
3827 rtx set
, first_set
, new_insn
;
3831 /* This block matches the logic in hash_scan_insn. */
3832 switch (GET_CODE (pat
))
3839 /* Search through the parallel looking for the set whose
3840 source was the expression that we're interested in. */
3841 first_set
= NULL_RTX
;
3843 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
3845 rtx x
= XVECEXP (pat
, 0, i
);
3846 if (GET_CODE (x
) == SET
)
3848 /* If the source was a REG_EQUAL or REG_EQUIV note, we
3849 may not find an equivalent expression, but in this
3850 case the PARALLEL will have a single set. */
3851 if (first_set
== NULL_RTX
)
3853 if (expr_equiv_p (SET_SRC (x
), expr
->expr
))
3861 gcc_assert (first_set
);
3862 if (set
== NULL_RTX
)
3870 if (REG_P (SET_DEST (set
)))
3872 old_reg
= SET_DEST (set
);
3873 /* Check if we can modify the set destination in the original insn. */
3874 if (validate_change (insn
, &SET_DEST (set
), reg
, 0))
3876 new_insn
= gen_move_insn (old_reg
, reg
);
3877 new_insn
= emit_insn_after (new_insn
, insn
);
3881 new_insn
= gen_move_insn (reg
, old_reg
);
3882 new_insn
= emit_insn_after (new_insn
, insn
);
3885 else /* This is possible only in case of a store to memory. */
3887 old_reg
= SET_SRC (set
);
3888 new_insn
= gen_move_insn (reg
, old_reg
);
3890 /* Check if we can modify the set source in the original insn. */
3891 if (validate_change (insn
, &SET_SRC (set
), reg
, 0))
3892 new_insn
= emit_insn_before (new_insn
, insn
);
3894 new_insn
= emit_insn_after (new_insn
, insn
);
3897 gcse_create_count
++;
3901 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
3902 BLOCK_FOR_INSN (insn
)->index
, INSN_UID (new_insn
), indx
,
3903 INSN_UID (insn
), regno
);
3906 /* Copy available expressions that reach the redundant expression
3907 to `reaching_reg'. */
3910 pre_insert_copies (void)
3912 unsigned int i
, added_copy
;
3917 /* For each available expression in the table, copy the result to
3918 `reaching_reg' if the expression reaches a deleted one.
3920 ??? The current algorithm is rather brute force.
3921 Need to do some profiling. */
3923 for (i
= 0; i
< expr_hash_table
.size
; i
++)
3924 for (expr
= expr_hash_table
.table
[i
]; expr
!= NULL
; expr
= expr
->next_same_hash
)
3926 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
3927 we don't want to insert a copy here because the expression may not
3928 really be redundant. So only insert an insn if the expression was
3929 deleted. This test also avoids further processing if the
3930 expression wasn't deleted anywhere. */
3931 if (expr
->reaching_reg
== NULL
)
3934 /* Set when we add a copy for that expression. */
3937 for (occr
= expr
->antic_occr
; occr
!= NULL
; occr
= occr
->next
)
3939 if (! occr
->deleted_p
)
3942 for (avail
= expr
->avail_occr
; avail
!= NULL
; avail
= avail
->next
)
3944 rtx insn
= avail
->insn
;
3946 /* No need to handle this one if handled already. */
3947 if (avail
->copied_p
)
3950 /* Don't handle this one if it's a redundant one. */
3951 if (INSN_DELETED_P (insn
))
3954 /* Or if the expression doesn't reach the deleted one. */
3955 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail
->insn
),
3957 BLOCK_FOR_INSN (occr
->insn
)))
3962 /* Copy the result of avail to reaching_reg. */
3963 pre_insert_copy_insn (expr
, insn
);
3964 avail
->copied_p
= 1;
3969 update_ld_motion_stores (expr
);
3973 /* Emit move from SRC to DEST noting the equivalence with expression computed
3976 gcse_emit_move_after (rtx src
, rtx dest
, rtx insn
)
3979 rtx set
= single_set (insn
), set2
;
3983 /* This should never fail since we're creating a reg->reg copy
3984 we've verified to be valid. */
3986 new_rtx
= emit_insn_after (gen_move_insn (dest
, src
), insn
);
3988 /* Note the equivalence for local CSE pass. */
3989 set2
= single_set (new_rtx
);
3990 if (!set2
|| !rtx_equal_p (SET_DEST (set2
), dest
))
3992 if ((note
= find_reg_equal_equiv_note (insn
)))
3993 eqv
= XEXP (note
, 0);
3995 eqv
= SET_SRC (set
);
3997 set_unique_reg_note (new_rtx
, REG_EQUAL
, copy_insn_1 (eqv
));
4002 /* Delete redundant computations.
4003 Deletion is done by changing the insn to copy the `reaching_reg' of
4004 the expression into the result of the SET. It is left to later passes
4005 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
4007 Returns nonzero if a change is made. */
4018 for (i
= 0; i
< expr_hash_table
.size
; i
++)
4019 for (expr
= expr_hash_table
.table
[i
];
4021 expr
= expr
->next_same_hash
)
4023 int indx
= expr
->bitmap_index
;
4025 /* We only need to search antic_occr since we require
4028 for (occr
= expr
->antic_occr
; occr
!= NULL
; occr
= occr
->next
)
4030 rtx insn
= occr
->insn
;
4032 basic_block bb
= BLOCK_FOR_INSN (insn
);
4034 /* We only delete insns that have a single_set. */
4035 if (TEST_BIT (pre_delete_map
[bb
->index
], indx
)
4036 && (set
= single_set (insn
)) != 0
4037 && dbg_cnt (pre_insn
))
4039 /* Create a pseudo-reg to store the result of reaching
4040 expressions into. Get the mode for the new pseudo from
4041 the mode of the original destination pseudo. */
4042 if (expr
->reaching_reg
== NULL
)
4043 expr
->reaching_reg
= gen_reg_rtx_and_attrs (SET_DEST (set
));
4045 gcse_emit_move_after (expr
->reaching_reg
, SET_DEST (set
), insn
);
4047 occr
->deleted_p
= 1;
4054 "PRE: redundant insn %d (expression %d) in ",
4055 INSN_UID (insn
), indx
);
4056 fprintf (dump_file
, "bb %d, reaching reg is %d\n",
4057 bb
->index
, REGNO (expr
->reaching_reg
));
4066 /* Perform GCSE optimizations using PRE.
4067 This is called by one_pre_gcse_pass after all the dataflow analysis
4070 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
4071 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
4072 Compiler Design and Implementation.
4074 ??? A new pseudo reg is created to hold the reaching expression. The nice
4075 thing about the classical approach is that it would try to use an existing
4076 reg. If the register can't be adequately optimized [i.e. we introduce
4077 reload problems], one could add a pass here to propagate the new register
4080 ??? We don't handle single sets in PARALLELs because we're [currently] not
4081 able to copy the rest of the parallel when we insert copies to create full
4082 redundancies from partial redundancies. However, there's no reason why we
4083 can't handle PARALLELs in the cases where there are no partial
4090 int did_insert
, changed
;
4091 struct expr
**index_map
;
4094 /* Compute a mapping from expression number (`bitmap_index') to
4095 hash table entry. */
4097 index_map
= XCNEWVEC (struct expr
*, expr_hash_table
.n_elems
);
4098 for (i
= 0; i
< expr_hash_table
.size
; i
++)
4099 for (expr
= expr_hash_table
.table
[i
]; expr
!= NULL
; expr
= expr
->next_same_hash
)
4100 index_map
[expr
->bitmap_index
] = expr
;
4102 /* Delete the redundant insns first so that
4103 - we know what register to use for the new insns and for the other
4104 ones with reaching expressions
4105 - we know which insns are redundant when we go to create copies */
4107 changed
= pre_delete ();
4108 did_insert
= pre_edge_insert (edge_list
, index_map
);
4110 /* In other places with reaching expressions, copy the expression to the
4111 specially allocated pseudo-reg that reaches the redundant expr. */
4112 pre_insert_copies ();
4115 commit_edge_insertions ();
4123 /* Top level routine to perform one PRE GCSE pass.
4125 Return nonzero if a change was made. */
4128 one_pre_gcse_pass (void)
4132 gcse_subst_count
= 0;
4133 gcse_create_count
= 0;
4135 /* Return if there's nothing to do, or it is too expensive. */
4136 if (n_basic_blocks
<= NUM_FIXED_BLOCKS
+ 1
4137 || is_too_expensive (_("PRE disabled")))
4140 /* We need alias. */
4141 init_alias_analysis ();
4144 gcc_obstack_init (&gcse_obstack
);
4147 alloc_hash_table (&expr_hash_table
, 0);
4148 add_noreturn_fake_exit_edges ();
4150 compute_ld_motion_mems ();
4152 compute_hash_table (&expr_hash_table
);
4153 trim_ld_motion_mems ();
4155 dump_hash_table (dump_file
, "Expression", &expr_hash_table
);
4157 if (expr_hash_table
.n_elems
> 0)
4159 alloc_pre_mem (last_basic_block
, expr_hash_table
.n_elems
);
4160 compute_pre_data ();
4161 changed
|= pre_gcse ();
4162 free_edge_list (edge_list
);
4167 remove_fake_exit_edges ();
4168 free_hash_table (&expr_hash_table
);
4171 obstack_free (&gcse_obstack
, NULL
);
4173 /* We are finished with alias. */
4174 end_alias_analysis ();
4178 fprintf (dump_file
, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
4179 current_function_name (), n_basic_blocks
, bytes_used
);
4180 fprintf (dump_file
, "%d substs, %d insns created\n",
4181 gcse_subst_count
, gcse_create_count
);
4187 /* If X contains any LABEL_REF's, add REG_LABEL_OPERAND notes for them
4188 to INSN. If such notes are added to an insn which references a
4189 CODE_LABEL, the LABEL_NUSES count is incremented. We have to add
4190 that note, because the following loop optimization pass requires
4193 /* ??? If there was a jump optimization pass after gcse and before loop,
4194 then we would not need to do this here, because jump would add the
4195 necessary REG_LABEL_OPERAND and REG_LABEL_TARGET notes. */
4198 add_label_notes (rtx x
, rtx insn
)
4200 enum rtx_code code
= GET_CODE (x
);
4204 if (code
== LABEL_REF
&& !LABEL_REF_NONLOCAL_P (x
))
4206 /* This code used to ignore labels that referred to dispatch tables to
4207 avoid flow generating (slightly) worse code.
4209 We no longer ignore such label references (see LABEL_REF handling in
4210 mark_jump_label for additional information). */
4212 /* There's no reason for current users to emit jump-insns with
4213 such a LABEL_REF, so we don't have to handle REG_LABEL_TARGET
4215 gcc_assert (!JUMP_P (insn
));
4216 add_reg_note (insn
, REG_LABEL_OPERAND
, XEXP (x
, 0));
4218 if (LABEL_P (XEXP (x
, 0)))
4219 LABEL_NUSES (XEXP (x
, 0))++;
4224 for (i
= GET_RTX_LENGTH (code
) - 1, fmt
= GET_RTX_FORMAT (code
); i
>= 0; i
--)
4227 add_label_notes (XEXP (x
, i
), insn
);
4228 else if (fmt
[i
] == 'E')
4229 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
4230 add_label_notes (XVECEXP (x
, i
, j
), insn
);
4234 /* Code Hoisting variables and subroutines. */
4236 /* Very busy expressions. */
4237 static sbitmap
*hoist_vbein
;
4238 static sbitmap
*hoist_vbeout
;
4240 /* ??? We could compute post dominators and run this algorithm in
4241 reverse to perform tail merging, doing so would probably be
4242 more effective than the tail merging code in jump.c.
4244 It's unclear if tail merging could be run in parallel with
4245 code hoisting. It would be nice. */
4247 /* Allocate vars used for code hoisting analysis. */
4250 alloc_code_hoist_mem (int n_blocks
, int n_exprs
)
4252 antloc
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
4253 transp
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
4254 comp
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
4256 hoist_vbein
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
4257 hoist_vbeout
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
4260 /* Free vars used for code hoisting analysis. */
4263 free_code_hoist_mem (void)
4265 sbitmap_vector_free (antloc
);
4266 sbitmap_vector_free (transp
);
4267 sbitmap_vector_free (comp
);
4269 sbitmap_vector_free (hoist_vbein
);
4270 sbitmap_vector_free (hoist_vbeout
);
4272 free_dominance_info (CDI_DOMINATORS
);
4275 /* Compute the very busy expressions at entry/exit from each block.
4277 An expression is very busy if all paths from a given point
4278 compute the expression. */
4281 compute_code_hoist_vbeinout (void)
4283 int changed
, passes
;
4286 sbitmap_vector_zero (hoist_vbeout
, last_basic_block
);
4287 sbitmap_vector_zero (hoist_vbein
, last_basic_block
);
4296 /* We scan the blocks in the reverse order to speed up
4298 FOR_EACH_BB_REVERSE (bb
)
4300 if (bb
->next_bb
!= EXIT_BLOCK_PTR
)
4302 sbitmap_intersection_of_succs (hoist_vbeout
[bb
->index
],
4303 hoist_vbein
, bb
->index
);
4305 /* Include expressions in VBEout that are calculated
4306 in BB and available at its end. */
4307 sbitmap_a_or_b (hoist_vbeout
[bb
->index
],
4308 hoist_vbeout
[bb
->index
], comp
[bb
->index
]);
4311 changed
|= sbitmap_a_or_b_and_c_cg (hoist_vbein
[bb
->index
],
4313 hoist_vbeout
[bb
->index
],
4322 fprintf (dump_file
, "hoisting vbeinout computation: %d passes\n", passes
);
4326 fprintf (dump_file
, "vbein (%d): ", bb
->index
);
4327 dump_sbitmap_file (dump_file
, hoist_vbein
[bb
->index
]);
4328 fprintf (dump_file
, "vbeout(%d): ", bb
->index
);
4329 dump_sbitmap_file (dump_file
, hoist_vbeout
[bb
->index
]);
4334 /* Top level routine to do the dataflow analysis needed by code hoisting. */
4337 compute_code_hoist_data (void)
4339 compute_local_properties (transp
, comp
, antloc
, &expr_hash_table
);
4340 prune_expressions (false);
4341 compute_code_hoist_vbeinout ();
4342 calculate_dominance_info (CDI_DOMINATORS
);
4344 fprintf (dump_file
, "\n");
4347 /* Determine if the expression identified by EXPR_INDEX would
4348 reach BB unimpared if it was placed at the end of EXPR_BB.
4349 Stop the search if the expression would need to be moved more
4350 than DISTANCE instructions.
4352 It's unclear exactly what Muchnick meant by "unimpared". It seems
4353 to me that the expression must either be computed or transparent in
4354 *every* block in the path(s) from EXPR_BB to BB. Any other definition
4355 would allow the expression to be hoisted out of loops, even if
4356 the expression wasn't a loop invariant.
4358 Contrast this to reachability for PRE where an expression is
4359 considered reachable if *any* path reaches instead of *all*
4363 hoist_expr_reaches_here_p (basic_block expr_bb
, int expr_index
, basic_block bb
,
4364 char *visited
, int distance
, int *bb_size
)
4368 int visited_allocated_locally
= 0;
4370 /* Terminate the search if distance, for which EXPR is allowed to move,
4374 distance
-= bb_size
[bb
->index
];
4380 gcc_assert (distance
== 0);
4382 if (visited
== NULL
)
4384 visited_allocated_locally
= 1;
4385 visited
= XCNEWVEC (char, last_basic_block
);
4388 FOR_EACH_EDGE (pred
, ei
, bb
->preds
)
4390 basic_block pred_bb
= pred
->src
;
4392 if (pred
->src
== ENTRY_BLOCK_PTR
)
4394 else if (pred_bb
== expr_bb
)
4396 else if (visited
[pred_bb
->index
])
4399 else if (! TEST_BIT (transp
[pred_bb
->index
], expr_index
))
4405 visited
[pred_bb
->index
] = 1;
4406 if (! hoist_expr_reaches_here_p (expr_bb
, expr_index
, pred_bb
,
4407 visited
, distance
, bb_size
))
4411 if (visited_allocated_locally
)
4414 return (pred
== NULL
);
4417 /* Find occurence in BB. */
4418 static struct occr
*
4419 find_occr_in_bb (struct occr
*occr
, basic_block bb
)
4421 /* Find the right occurrence of this expression. */
4422 while (occr
&& BLOCK_FOR_INSN (occr
->insn
) != bb
)
4428 /* Actually perform code hoisting. */
4433 basic_block bb
, dominated
;
4434 VEC (basic_block
, heap
) *dom_tree_walk
;
4435 unsigned int dom_tree_walk_index
;
4436 VEC (basic_block
, heap
) *domby
;
4438 struct expr
**index_map
;
4444 /* Compute a mapping from expression number (`bitmap_index') to
4445 hash table entry. */
4447 index_map
= XCNEWVEC (struct expr
*, expr_hash_table
.n_elems
);
4448 for (i
= 0; i
< expr_hash_table
.size
; i
++)
4449 for (expr
= expr_hash_table
.table
[i
]; expr
!= NULL
; expr
= expr
->next_same_hash
)
4450 index_map
[expr
->bitmap_index
] = expr
;
4452 /* Calculate sizes of basic blocks and note how far
4453 each instruction is from the start of its block. We then use this
4454 data to restrict distance an expression can travel. */
4456 to_bb_head
= XCNEWVEC (int, get_max_uid ());
4457 bb_size
= XCNEWVEC (int, last_basic_block
);
4465 FOR_BB_INSNS (bb
, insn
)
4467 /* Don't count debug instructions to avoid them affecting
4468 decision choices. */
4469 if (NONDEBUG_INSN_P (insn
))
4470 to_bb_head
[INSN_UID (insn
)] = to_head
++;
4473 bb_size
[bb
->index
] = to_head
;
4476 gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR
->succs
) == 1
4477 && (EDGE_SUCC (ENTRY_BLOCK_PTR
, 0)->dest
4478 == ENTRY_BLOCK_PTR
->next_bb
));
4480 dom_tree_walk
= get_all_dominated_blocks (CDI_DOMINATORS
,
4481 ENTRY_BLOCK_PTR
->next_bb
);
4483 /* Walk over each basic block looking for potentially hoistable
4484 expressions, nothing gets hoisted from the entry block. */
4485 FOR_EACH_VEC_ELT (basic_block
, dom_tree_walk
, dom_tree_walk_index
, bb
)
4487 domby
= get_dominated_to_depth (CDI_DOMINATORS
, bb
, MAX_HOIST_DEPTH
);
4489 if (VEC_length (basic_block
, domby
) == 0)
4492 /* Examine each expression that is very busy at the exit of this
4493 block. These are the potentially hoistable expressions. */
4494 for (i
= 0; i
< hoist_vbeout
[bb
->index
]->n_bits
; i
++)
4496 if (TEST_BIT (hoist_vbeout
[bb
->index
], i
))
4498 /* Current expression. */
4499 struct expr
*expr
= index_map
[i
];
4500 /* Number of occurences of EXPR that can be hoisted to BB. */
4502 /* Basic blocks that have occurences reachable from BB. */
4503 bitmap_head _from_bbs
, *from_bbs
= &_from_bbs
;
4504 /* Occurences reachable from BB. */
4505 VEC (occr_t
, heap
) *occrs_to_hoist
= NULL
;
4506 /* We want to insert the expression into BB only once, so
4507 note when we've inserted it. */
4508 int insn_inserted_p
;
4511 bitmap_initialize (from_bbs
, 0);
4513 /* If an expression is computed in BB and is available at end of
4514 BB, hoist all occurences dominated by BB to BB. */
4515 if (TEST_BIT (comp
[bb
->index
], i
))
4517 occr
= find_occr_in_bb (expr
->antic_occr
, bb
);
4521 /* An occurence might've been already deleted
4522 while processing a dominator of BB. */
4523 if (occr
->deleted_p
)
4524 gcc_assert (MAX_HOIST_DEPTH
> 1);
4527 gcc_assert (NONDEBUG_INSN_P (occr
->insn
));
4535 /* We've found a potentially hoistable expression, now
4536 we look at every block BB dominates to see if it
4537 computes the expression. */
4538 FOR_EACH_VEC_ELT (basic_block
, domby
, j
, dominated
)
4542 /* Ignore self dominance. */
4543 if (bb
== dominated
)
4545 /* We've found a dominated block, now see if it computes
4546 the busy expression and whether or not moving that
4547 expression to the "beginning" of that block is safe. */
4548 if (!TEST_BIT (antloc
[dominated
->index
], i
))
4551 occr
= find_occr_in_bb (expr
->antic_occr
, dominated
);
4554 /* An occurence might've been already deleted
4555 while processing a dominator of BB. */
4556 if (occr
->deleted_p
)
4558 gcc_assert (MAX_HOIST_DEPTH
> 1);
4561 gcc_assert (NONDEBUG_INSN_P (occr
->insn
));
4563 max_distance
= expr
->max_distance
;
4564 if (max_distance
> 0)
4565 /* Adjust MAX_DISTANCE to account for the fact that
4566 OCCR won't have to travel all of DOMINATED, but
4568 max_distance
+= (bb_size
[dominated
->index
]
4569 - to_bb_head
[INSN_UID (occr
->insn
)]);
4571 /* Note if the expression would reach the dominated block
4572 unimpared if it was placed at the end of BB.
4574 Keep track of how many times this expression is hoistable
4575 from a dominated block into BB. */
4576 if (hoist_expr_reaches_here_p (bb
, i
, dominated
, NULL
,
4577 max_distance
, bb_size
))
4580 VEC_safe_push (occr_t
, heap
,
4581 occrs_to_hoist
, occr
);
4582 bitmap_set_bit (from_bbs
, dominated
->index
);
4586 /* If we found more than one hoistable occurrence of this
4587 expression, then note it in the vector of expressions to
4588 hoist. It makes no sense to hoist things which are computed
4589 in only one BB, and doing so tends to pessimize register
4590 allocation. One could increase this value to try harder
4591 to avoid any possible code expansion due to register
4592 allocation issues; however experiments have shown that
4593 the vast majority of hoistable expressions are only movable
4594 from two successors, so raising this threshold is likely
4595 to nullify any benefit we get from code hoisting. */
4596 if (hoistable
> 1 && dbg_cnt (hoist_insn
))
4598 /* If (hoistable != VEC_length), then there is
4599 an occurence of EXPR in BB itself. Don't waste
4600 time looking for LCA in this case. */
4601 if ((unsigned) hoistable
4602 == VEC_length (occr_t
, occrs_to_hoist
))
4606 lca
= nearest_common_dominator_for_set (CDI_DOMINATORS
,
4609 /* Punt, it's better to hoist these occurences to
4611 VEC_free (occr_t
, heap
, occrs_to_hoist
);
4615 /* Punt, no point hoisting a single occurence. */
4616 VEC_free (occr_t
, heap
, occrs_to_hoist
);
4618 insn_inserted_p
= 0;
4620 /* Walk through occurences of I'th expressions we want
4621 to hoist to BB and make the transformations. */
4622 FOR_EACH_VEC_ELT (occr_t
, occrs_to_hoist
, j
, occr
)
4627 gcc_assert (!occr
->deleted_p
);
4630 set
= single_set (insn
);
4633 /* Create a pseudo-reg to store the result of reaching
4634 expressions into. Get the mode for the new pseudo
4635 from the mode of the original destination pseudo.
4637 It is important to use new pseudos whenever we
4638 emit a set. This will allow reload to use
4639 rematerialization for such registers. */
4640 if (!insn_inserted_p
)
4642 = gen_reg_rtx_and_attrs (SET_DEST (set
));
4644 gcse_emit_move_after (expr
->reaching_reg
, SET_DEST (set
),
4647 occr
->deleted_p
= 1;
4651 if (!insn_inserted_p
)
4653 insert_insn_end_basic_block (expr
, bb
);
4654 insn_inserted_p
= 1;
4658 VEC_free (occr_t
, heap
, occrs_to_hoist
);
4659 bitmap_clear (from_bbs
);
4662 VEC_free (basic_block
, heap
, domby
);
4665 VEC_free (basic_block
, heap
, dom_tree_walk
);
4673 /* Top level routine to perform one code hoisting (aka unification) pass
4675 Return nonzero if a change was made. */
4678 one_code_hoisting_pass (void)
4682 gcse_subst_count
= 0;
4683 gcse_create_count
= 0;
4685 /* Return if there's nothing to do, or it is too expensive. */
4686 if (n_basic_blocks
<= NUM_FIXED_BLOCKS
+ 1
4687 || is_too_expensive (_("GCSE disabled")))
4690 doing_code_hoisting_p
= true;
4692 /* We need alias. */
4693 init_alias_analysis ();
4696 gcc_obstack_init (&gcse_obstack
);
4699 alloc_hash_table (&expr_hash_table
, 0);
4700 compute_hash_table (&expr_hash_table
);
4702 dump_hash_table (dump_file
, "Code Hosting Expressions", &expr_hash_table
);
4704 if (expr_hash_table
.n_elems
> 0)
4706 alloc_code_hoist_mem (last_basic_block
, expr_hash_table
.n_elems
);
4707 compute_code_hoist_data ();
4708 changed
= hoist_code ();
4709 free_code_hoist_mem ();
4712 free_hash_table (&expr_hash_table
);
4714 obstack_free (&gcse_obstack
, NULL
);
4716 /* We are finished with alias. */
4717 end_alias_analysis ();
4721 fprintf (dump_file
, "HOIST of %s, %d basic blocks, %d bytes needed, ",
4722 current_function_name (), n_basic_blocks
, bytes_used
);
4723 fprintf (dump_file
, "%d substs, %d insns created\n",
4724 gcse_subst_count
, gcse_create_count
);
4727 doing_code_hoisting_p
= false;
4732 /* Here we provide the things required to do store motion towards
4733 the exit. In order for this to be effective, gcse also needed to
4734 be taught how to move a load when it is kill only by a store to itself.
4739 void foo(float scale)
4741 for (i=0; i<10; i++)
4745 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
4746 the load out since its live around the loop, and stored at the bottom
4749 The 'Load Motion' referred to and implemented in this file is
4750 an enhancement to gcse which when using edge based lcm, recognizes
4751 this situation and allows gcse to move the load out of the loop.
4753 Once gcse has hoisted the load, store motion can then push this
4754 load towards the exit, and we end up with no loads or stores of 'i'
4758 pre_ldst_expr_hash (const void *p
)
4760 int do_not_record_p
= 0;
4761 const struct ls_expr
*const x
= (const struct ls_expr
*) p
;
4762 return hash_rtx (x
->pattern
, GET_MODE (x
->pattern
), &do_not_record_p
, NULL
, false);
4766 pre_ldst_expr_eq (const void *p1
, const void *p2
)
4768 const struct ls_expr
*const ptr1
= (const struct ls_expr
*) p1
,
4769 *const ptr2
= (const struct ls_expr
*) p2
;
4770 return expr_equiv_p (ptr1
->pattern
, ptr2
->pattern
);
4773 /* This will search the ldst list for a matching expression. If it
4774 doesn't find one, we create one and initialize it. */
4776 static struct ls_expr
*
4779 int do_not_record_p
= 0;
4780 struct ls_expr
* ptr
;
4785 hash
= hash_rtx (x
, GET_MODE (x
), &do_not_record_p
,
4786 NULL
, /*have_reg_qty=*/false);
4789 slot
= htab_find_slot_with_hash (pre_ldst_table
, &e
, hash
, INSERT
);
4791 return (struct ls_expr
*)*slot
;
4793 ptr
= XNEW (struct ls_expr
);
4795 ptr
->next
= pre_ldst_mems
;
4798 ptr
->pattern_regs
= NULL_RTX
;
4799 ptr
->loads
= NULL_RTX
;
4800 ptr
->stores
= NULL_RTX
;
4801 ptr
->reaching_reg
= NULL_RTX
;
4804 ptr
->hash_index
= hash
;
4805 pre_ldst_mems
= ptr
;
4811 /* Free up an individual ldst entry. */
4814 free_ldst_entry (struct ls_expr
* ptr
)
4816 free_INSN_LIST_list (& ptr
->loads
);
4817 free_INSN_LIST_list (& ptr
->stores
);
4822 /* Free up all memory associated with the ldst list. */
4825 free_ldst_mems (void)
4828 htab_delete (pre_ldst_table
);
4829 pre_ldst_table
= NULL
;
4831 while (pre_ldst_mems
)
4833 struct ls_expr
* tmp
= pre_ldst_mems
;
4835 pre_ldst_mems
= pre_ldst_mems
->next
;
4837 free_ldst_entry (tmp
);
4840 pre_ldst_mems
= NULL
;
4843 /* Dump debugging info about the ldst list. */
4846 print_ldst_list (FILE * file
)
4848 struct ls_expr
* ptr
;
4850 fprintf (file
, "LDST list: \n");
4852 for (ptr
= first_ls_expr (); ptr
!= NULL
; ptr
= next_ls_expr (ptr
))
4854 fprintf (file
, " Pattern (%3d): ", ptr
->index
);
4856 print_rtl (file
, ptr
->pattern
);
4858 fprintf (file
, "\n Loads : ");
4861 print_rtl (file
, ptr
->loads
);
4863 fprintf (file
, "(nil)");
4865 fprintf (file
, "\n Stores : ");
4868 print_rtl (file
, ptr
->stores
);
4870 fprintf (file
, "(nil)");
4872 fprintf (file
, "\n\n");
4875 fprintf (file
, "\n");
4878 /* Returns 1 if X is in the list of ldst only expressions. */
4880 static struct ls_expr
*
4881 find_rtx_in_ldst (rtx x
)
4885 if (!pre_ldst_table
)
4888 slot
= htab_find_slot (pre_ldst_table
, &e
, NO_INSERT
);
4889 if (!slot
|| ((struct ls_expr
*)*slot
)->invalid
)
4891 return (struct ls_expr
*) *slot
;
4894 /* Return first item in the list. */
4896 static inline struct ls_expr
*
4897 first_ls_expr (void)
4899 return pre_ldst_mems
;
4902 /* Return the next item in the list after the specified one. */
4904 static inline struct ls_expr
*
4905 next_ls_expr (struct ls_expr
* ptr
)
4910 /* Load Motion for loads which only kill themselves. */
4912 /* Return true if x is a simple MEM operation, with no registers or
4913 side effects. These are the types of loads we consider for the
4914 ld_motion list, otherwise we let the usual aliasing take care of it. */
4917 simple_mem (const_rtx x
)
4922 if (MEM_VOLATILE_P (x
))
4925 if (GET_MODE (x
) == BLKmode
)
4928 /* If we are handling exceptions, we must be careful with memory references
4929 that may trap. If we are not, the behavior is undefined, so we may just
4931 if (cfun
->can_throw_non_call_exceptions
&& may_trap_p (x
))
4934 if (side_effects_p (x
))
4937 /* Do not consider function arguments passed on stack. */
4938 if (reg_mentioned_p (stack_pointer_rtx
, x
))
4941 if (flag_float_store
&& FLOAT_MODE_P (GET_MODE (x
)))
4947 /* Make sure there isn't a buried reference in this pattern anywhere.
4948 If there is, invalidate the entry for it since we're not capable
4949 of fixing it up just yet.. We have to be sure we know about ALL
4950 loads since the aliasing code will allow all entries in the
4951 ld_motion list to not-alias itself. If we miss a load, we will get
4952 the wrong value since gcse might common it and we won't know to
4956 invalidate_any_buried_refs (rtx x
)
4960 struct ls_expr
* ptr
;
4962 /* Invalidate it in the list. */
4963 if (MEM_P (x
) && simple_mem (x
))
4965 ptr
= ldst_entry (x
);
4969 /* Recursively process the insn. */
4970 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
4972 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
4975 invalidate_any_buried_refs (XEXP (x
, i
));
4976 else if (fmt
[i
] == 'E')
4977 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
4978 invalidate_any_buried_refs (XVECEXP (x
, i
, j
));
4982 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
4983 being defined as MEM loads and stores to symbols, with no side effects
4984 and no registers in the expression. For a MEM destination, we also
4985 check that the insn is still valid if we replace the destination with a
4986 REG, as is done in update_ld_motion_stores. If there are any uses/defs
4987 which don't match this criteria, they are invalidated and trimmed out
4991 compute_ld_motion_mems (void)
4993 struct ls_expr
* ptr
;
4997 pre_ldst_mems
= NULL
;
4998 pre_ldst_table
= htab_create (13, pre_ldst_expr_hash
,
4999 pre_ldst_expr_eq
, NULL
);
5003 FOR_BB_INSNS (bb
, insn
)
5005 if (NONDEBUG_INSN_P (insn
))
5007 if (GET_CODE (PATTERN (insn
)) == SET
)
5009 rtx src
= SET_SRC (PATTERN (insn
));
5010 rtx dest
= SET_DEST (PATTERN (insn
));
5012 /* Check for a simple LOAD... */
5013 if (MEM_P (src
) && simple_mem (src
))
5015 ptr
= ldst_entry (src
);
5017 ptr
->loads
= alloc_INSN_LIST (insn
, ptr
->loads
);
5023 /* Make sure there isn't a buried load somewhere. */
5024 invalidate_any_buried_refs (src
);
5027 /* Check for stores. Don't worry about aliased ones, they
5028 will block any movement we might do later. We only care
5029 about this exact pattern since those are the only
5030 circumstance that we will ignore the aliasing info. */
5031 if (MEM_P (dest
) && simple_mem (dest
))
5033 ptr
= ldst_entry (dest
);
5036 && GET_CODE (src
) != ASM_OPERANDS
5037 /* Check for REG manually since want_to_gcse_p
5038 returns 0 for all REGs. */
5039 && can_assign_to_reg_without_clobbers_p (src
))
5040 ptr
->stores
= alloc_INSN_LIST (insn
, ptr
->stores
);
5046 invalidate_any_buried_refs (PATTERN (insn
));
5052 /* Remove any references that have been either invalidated or are not in the
5053 expression list for pre gcse. */
5056 trim_ld_motion_mems (void)
5058 struct ls_expr
* * last
= & pre_ldst_mems
;
5059 struct ls_expr
* ptr
= pre_ldst_mems
;
5065 /* Delete if entry has been made invalid. */
5068 /* Delete if we cannot find this mem in the expression list. */
5069 unsigned int hash
= ptr
->hash_index
% expr_hash_table
.size
;
5071 for (expr
= expr_hash_table
.table
[hash
];
5073 expr
= expr
->next_same_hash
)
5074 if (expr_equiv_p (expr
->expr
, ptr
->pattern
))
5078 expr
= (struct expr
*) 0;
5082 /* Set the expression field if we are keeping it. */
5090 htab_remove_elt_with_hash (pre_ldst_table
, ptr
, ptr
->hash_index
);
5091 free_ldst_entry (ptr
);
5096 /* Show the world what we've found. */
5097 if (dump_file
&& pre_ldst_mems
!= NULL
)
5098 print_ldst_list (dump_file
);
5101 /* This routine will take an expression which we are replacing with
5102 a reaching register, and update any stores that are needed if
5103 that expression is in the ld_motion list. Stores are updated by
5104 copying their SRC to the reaching register, and then storing
5105 the reaching register into the store location. These keeps the
5106 correct value in the reaching register for the loads. */
5109 update_ld_motion_stores (struct expr
* expr
)
5111 struct ls_expr
* mem_ptr
;
5113 if ((mem_ptr
= find_rtx_in_ldst (expr
->expr
)))
5115 /* We can try to find just the REACHED stores, but is shouldn't
5116 matter to set the reaching reg everywhere... some might be
5117 dead and should be eliminated later. */
5119 /* We replace (set mem expr) with (set reg expr) (set mem reg)
5120 where reg is the reaching reg used in the load. We checked in
5121 compute_ld_motion_mems that we can replace (set mem expr) with
5122 (set reg expr) in that insn. */
5123 rtx list
= mem_ptr
->stores
;
5125 for ( ; list
!= NULL_RTX
; list
= XEXP (list
, 1))
5127 rtx insn
= XEXP (list
, 0);
5128 rtx pat
= PATTERN (insn
);
5129 rtx src
= SET_SRC (pat
);
5130 rtx reg
= expr
->reaching_reg
;
5133 /* If we've already copied it, continue. */
5134 if (expr
->reaching_reg
== src
)
5139 fprintf (dump_file
, "PRE: store updated with reaching reg ");
5140 print_rtl (dump_file
, expr
->reaching_reg
);
5141 fprintf (dump_file
, ":\n ");
5142 print_inline_rtx (dump_file
, insn
, 8);
5143 fprintf (dump_file
, "\n");
5146 copy
= gen_move_insn (reg
, copy_rtx (SET_SRC (pat
)));
5147 emit_insn_before (copy
, insn
);
5148 SET_SRC (pat
) = reg
;
5149 df_insn_rescan (insn
);
5151 /* un-recognize this pattern since it's probably different now. */
5152 INSN_CODE (insn
) = -1;
5153 gcse_create_count
++;
5158 /* Return true if the graph is too expensive to optimize. PASS is the
5159 optimization about to be performed. */
5162 is_too_expensive (const char *pass
)
5164 /* Trying to perform global optimizations on flow graphs which have
5165 a high connectivity will take a long time and is unlikely to be
5166 particularly useful.
5168 In normal circumstances a cfg should have about twice as many
5169 edges as blocks. But we do not want to punish small functions
5170 which have a couple switch statements. Rather than simply
5171 threshold the number of blocks, uses something with a more
5172 graceful degradation. */
5173 if (n_edges
> 20000 + n_basic_blocks
* 4)
5175 warning (OPT_Wdisabled_optimization
,
5176 "%s: %d basic blocks and %d edges/basic block",
5177 pass
, n_basic_blocks
, n_edges
/ n_basic_blocks
);
5182 /* If allocating memory for the cprop bitmap would take up too much
5183 storage it's better just to disable the optimization. */
5185 * SBITMAP_SET_SIZE (max_reg_num ())
5186 * sizeof (SBITMAP_ELT_TYPE
)) > MAX_GCSE_MEMORY
)
5188 warning (OPT_Wdisabled_optimization
,
5189 "%s: %d basic blocks and %d registers",
5190 pass
, n_basic_blocks
, max_reg_num ());
5199 /* Main function for the CPROP pass. */
5202 one_cprop_pass (void)
5206 /* Return if there's nothing to do, or it is too expensive. */
5207 if (n_basic_blocks
<= NUM_FIXED_BLOCKS
+ 1
5208 || is_too_expensive (_ ("const/copy propagation disabled")))
5211 global_const_prop_count
= local_const_prop_count
= 0;
5212 global_copy_prop_count
= local_copy_prop_count
= 0;
5215 gcc_obstack_init (&gcse_obstack
);
5218 /* Do a local const/copy propagation pass first. The global pass
5219 only handles global opportunities.
5220 If the local pass changes something, remove any unreachable blocks
5221 because the CPROP global dataflow analysis may get into infinite
5222 loops for CFGs with unreachable blocks.
5224 FIXME: This local pass should not be necessary after CSE (but for
5225 some reason it still is). It is also (proven) not necessary
5226 to run the local pass right after FWPWOP.
5228 FIXME: The global analysis would not get into infinite loops if it
5229 would use the DF solver (via df_simple_dataflow) instead of
5230 the solver implemented in this file. */
5231 if (local_cprop_pass ())
5233 delete_unreachable_blocks ();
5237 /* Determine implicit sets. */
5238 implicit_sets
= XCNEWVEC (rtx
, last_basic_block
);
5239 find_implicit_sets ();
5241 alloc_hash_table (&set_hash_table
, 1);
5242 compute_hash_table (&set_hash_table
);
5244 /* Free implicit_sets before peak usage. */
5245 free (implicit_sets
);
5246 implicit_sets
= NULL
;
5249 dump_hash_table (dump_file
, "SET", &set_hash_table
);
5250 if (set_hash_table
.n_elems
> 0)
5255 alloc_cprop_mem (last_basic_block
, set_hash_table
.n_elems
);
5256 compute_cprop_data ();
5258 FOR_BB_BETWEEN (bb
, ENTRY_BLOCK_PTR
->next_bb
->next_bb
, EXIT_BLOCK_PTR
, next_bb
)
5260 /* Reset tables used to keep track of what's still valid [since
5261 the start of the block]. */
5262 reset_opr_set_tables ();
5264 FOR_BB_INSNS (bb
, insn
)
5267 changed
|= cprop_insn (insn
);
5269 /* Keep track of everything modified by this insn. */
5270 /* ??? Need to be careful w.r.t. mods done to INSN.
5271 Don't call mark_oprs_set if we turned the
5272 insn into a NOTE. */
5273 if (! NOTE_P (insn
))
5274 mark_oprs_set (insn
);
5278 changed
|= bypass_conditional_jumps ();
5282 free_hash_table (&set_hash_table
);
5284 obstack_free (&gcse_obstack
, NULL
);
5288 fprintf (dump_file
, "CPROP of %s, %d basic blocks, %d bytes needed, ",
5289 current_function_name (), n_basic_blocks
, bytes_used
);
5290 fprintf (dump_file
, "%d local const props, %d local copy props, ",
5291 local_const_prop_count
, local_copy_prop_count
);
5292 fprintf (dump_file
, "%d global const props, %d global copy props\n\n",
5293 global_const_prop_count
, global_copy_prop_count
);
5300 /* All the passes implemented in this file. Each pass has its
5301 own gate and execute function, and at the end of the file a
5302 pass definition for passes.c.
5304 We do not construct an accurate cfg in functions which call
5305 setjmp, so none of these passes runs if the function calls
5307 FIXME: Should just handle setjmp via REG_SETJMP notes. */
5310 gate_rtl_cprop (void)
5312 return optimize
> 0 && flag_gcse
5313 && !cfun
->calls_setjmp
5318 execute_rtl_cprop (void)
5321 delete_unreachable_blocks ();
5322 df_set_flags (DF_LR_RUN_DCE
);
5324 changed
= one_cprop_pass ();
5325 flag_rerun_cse_after_global_opts
|= changed
;
5334 return optimize
> 0 && flag_gcse
5335 && !cfun
->calls_setjmp
5336 && optimize_function_for_speed_p (cfun
)
5341 execute_rtl_pre (void)
5344 delete_unreachable_blocks ();
5346 changed
= one_pre_gcse_pass ();
5347 flag_rerun_cse_after_global_opts
|= changed
;
5354 gate_rtl_hoist (void)
5356 return optimize
> 0 && flag_gcse
5357 && !cfun
->calls_setjmp
5358 /* It does not make sense to run code hoisting unless we are optimizing
5359 for code size -- it rarely makes programs faster, and can make then
5360 bigger if we did PRE (when optimizing for space, we don't run PRE). */
5361 && optimize_function_for_size_p (cfun
)
5366 execute_rtl_hoist (void)
5369 delete_unreachable_blocks ();
5371 changed
= one_code_hoisting_pass ();
5372 flag_rerun_cse_after_global_opts
|= changed
;
5378 struct rtl_opt_pass pass_rtl_cprop
=
5383 gate_rtl_cprop
, /* gate */
5384 execute_rtl_cprop
, /* execute */
5387 0, /* static_pass_number */
5388 TV_CPROP
, /* tv_id */
5389 PROP_cfglayout
, /* properties_required */
5390 0, /* properties_provided */
5391 0, /* properties_destroyed */
5392 0, /* todo_flags_start */
5393 TODO_df_finish
| TODO_verify_rtl_sharing
|
5395 TODO_verify_flow
| TODO_ggc_collect
/* todo_flags_finish */
5399 struct rtl_opt_pass pass_rtl_pre
=
5403 "rtl pre", /* name */
5404 gate_rtl_pre
, /* gate */
5405 execute_rtl_pre
, /* execute */
5408 0, /* static_pass_number */
5410 PROP_cfglayout
, /* properties_required */
5411 0, /* properties_provided */
5412 0, /* properties_destroyed */
5413 0, /* todo_flags_start */
5414 TODO_df_finish
| TODO_verify_rtl_sharing
|
5416 TODO_verify_flow
| TODO_ggc_collect
/* todo_flags_finish */
5420 struct rtl_opt_pass pass_rtl_hoist
=
5425 gate_rtl_hoist
, /* gate */
5426 execute_rtl_hoist
, /* execute */
5429 0, /* static_pass_number */
5430 TV_HOIST
, /* tv_id */
5431 PROP_cfglayout
, /* properties_required */
5432 0, /* properties_provided */
5433 0, /* properties_destroyed */
5434 0, /* todo_flags_start */
5435 TODO_df_finish
| TODO_verify_rtl_sharing
|
5437 TODO_verify_flow
| TODO_ggc_collect
/* todo_flags_finish */
5441 #include "gt-gcse.h"