Enable dumping of alias graphs.
[official-gcc/Ramakrishna.git] / gcc / gcse.c
blobdc4aa8b9a965eb3fef880e95ea05ef309b4e6bdd
1 /* Global common subexpression elimination/Partial redundancy elimination
2 and global constant/copy propagation for GNU compiler.
3 Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
4 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* TODO
23 - reordering of memory allocation and freeing to be more space efficient
24 - do rough calc of how many regs are needed in each block, and a rough
25 calc of how many regs are available in each class and use that to
26 throttle back the code in cases where RTX_COST is minimal.
27 - a store to the same address as a load does not kill the load if the
28 source of the store is also the destination of the load. Handling this
29 allows more load motion, particularly out of loops.
33 /* References searched while implementing this.
35 Compilers Principles, Techniques and Tools
36 Aho, Sethi, Ullman
37 Addison-Wesley, 1988
39 Global Optimization by Suppression of Partial Redundancies
40 E. Morel, C. Renvoise
41 communications of the acm, Vol. 22, Num. 2, Feb. 1979
43 A Portable Machine-Independent Global Optimizer - Design and Measurements
44 Frederick Chow
45 Stanford Ph.D. thesis, Dec. 1983
47 A Fast Algorithm for Code Movement Optimization
48 D.M. Dhamdhere
49 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
51 A Solution to a Problem with Morel and Renvoise's
52 Global Optimization by Suppression of Partial Redundancies
53 K-H Drechsler, M.P. Stadel
54 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
56 Practical Adaptation of the Global Optimization
57 Algorithm of Morel and Renvoise
58 D.M. Dhamdhere
59 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
61 Efficiently Computing Static Single Assignment Form and the Control
62 Dependence Graph
63 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
64 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
66 Lazy Code Motion
67 J. Knoop, O. Ruthing, B. Steffen
68 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
70 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
71 Time for Reducible Flow Control
72 Thomas Ball
73 ACM Letters on Programming Languages and Systems,
74 Vol. 2, Num. 1-4, Mar-Dec 1993
76 An Efficient Representation for Sparse Sets
77 Preston Briggs, Linda Torczon
78 ACM Letters on Programming Languages and Systems,
79 Vol. 2, Num. 1-4, Mar-Dec 1993
81 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
82 K-H Drechsler, M.P. Stadel
83 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
85 Partial Dead Code Elimination
86 J. Knoop, O. Ruthing, B. Steffen
87 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
89 Effective Partial Redundancy Elimination
90 P. Briggs, K.D. Cooper
91 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
93 The Program Structure Tree: Computing Control Regions in Linear Time
94 R. Johnson, D. Pearson, K. Pingali
95 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
97 Optimal Code Motion: Theory and Practice
98 J. Knoop, O. Ruthing, B. Steffen
99 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
101 The power of assignment motion
102 J. Knoop, O. Ruthing, B. Steffen
103 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
105 Global code motion / global value numbering
106 C. Click
107 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
109 Value Driven Redundancy Elimination
110 L.T. Simpson
111 Rice University Ph.D. thesis, Apr. 1996
113 Value Numbering
114 L.T. Simpson
115 Massively Scalar Compiler Project, Rice University, Sep. 1996
117 High Performance Compilers for Parallel Computing
118 Michael Wolfe
119 Addison-Wesley, 1996
121 Advanced Compiler Design and Implementation
122 Steven Muchnick
123 Morgan Kaufmann, 1997
125 Building an Optimizing Compiler
126 Robert Morgan
127 Digital Press, 1998
129 People wishing to speed up the code here should read:
130 Elimination Algorithms for Data Flow Analysis
131 B.G. Ryder, M.C. Paull
132 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
134 How to Analyze Large Programs Efficiently and Informatively
135 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
136 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
138 People wishing to do something different can find various possibilities
139 in the above papers and elsewhere.
142 #include "config.h"
143 #include "system.h"
144 #include "coretypes.h"
145 #include "tm.h"
146 #include "toplev.h"
148 #include "rtl.h"
149 #include "tree.h"
150 #include "tm_p.h"
151 #include "regs.h"
152 #include "hard-reg-set.h"
153 #include "flags.h"
154 #include "real.h"
155 #include "insn-config.h"
156 #include "recog.h"
157 #include "basic-block.h"
158 #include "output.h"
159 #include "function.h"
160 #include "expr.h"
161 #include "except.h"
162 #include "ggc.h"
163 #include "params.h"
164 #include "cselib.h"
165 #include "intl.h"
166 #include "obstack.h"
167 #include "timevar.h"
168 #include "tree-pass.h"
169 #include "hashtab.h"
170 #include "df.h"
171 #include "dbgcnt.h"
172 #include "target.h"
174 /* Propagate flow information through back edges and thus enable PRE's
175 moving loop invariant calculations out of loops.
177 Originally this tended to create worse overall code, but several
178 improvements during the development of PRE seem to have made following
179 back edges generally a win.
181 Note much of the loop invariant code motion done here would normally
182 be done by loop.c, which has more heuristics for when to move invariants
183 out of loops. At some point we might need to move some of those
184 heuristics into gcse.c. */
186 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
187 are a superset of those done by GCSE.
189 We perform the following steps:
191 1) Compute table of places where registers are set.
193 2) Perform copy/constant propagation.
195 3) Perform global cse using lazy code motion if not optimizing
196 for size, or code hoisting if we are.
198 4) Perform another pass of copy/constant propagation. Try to bypass
199 conditional jumps if the condition can be computed from a value of
200 an incoming edge.
202 5) Perform store motion.
204 Two passes of copy/constant propagation are done because the first one
205 enables more GCSE and the second one helps to clean up the copies that
206 GCSE creates. This is needed more for PRE than for Classic because Classic
207 GCSE will try to use an existing register containing the common
208 subexpression rather than create a new one. This is harder to do for PRE
209 because of the code motion (which Classic GCSE doesn't do).
211 Expressions we are interested in GCSE-ing are of the form
212 (set (pseudo-reg) (expression)).
213 Function want_to_gcse_p says what these are.
215 In addition, expressions in REG_EQUAL notes are candidates for GXSE-ing.
216 This allows PRE to hoist expressions that are expressed in multiple insns,
217 such as comprex address calculations (e.g. for PIC code, or loads with a
218 high part and as lowe part).
220 PRE handles moving invariant expressions out of loops (by treating them as
221 partially redundant).
223 Eventually it would be nice to replace cse.c/gcse.c with SSA (static single
224 assignment) based GVN (global value numbering). L. T. Simpson's paper
225 (Rice University) on value numbering is a useful reference for this.
227 **********************
229 We used to support multiple passes but there are diminishing returns in
230 doing so. The first pass usually makes 90% of the changes that are doable.
231 A second pass can make a few more changes made possible by the first pass.
232 Experiments show any further passes don't make enough changes to justify
233 the expense.
235 A study of spec92 using an unlimited number of passes:
236 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
237 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
238 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
240 It was found doing copy propagation between each pass enables further
241 substitutions.
243 This study was done before expressions in REG_EQUAL notes were added as
244 candidate expressions for optimization, and before the GIMPLE optimizers
245 were added. Probably, multiple passes is even less efficient now than
246 at the time when the study was conducted.
248 PRE is quite expensive in complicated functions because the DFA can take
249 a while to converge. Hence we only perform one pass.
251 **********************
253 The steps for PRE are:
255 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
257 2) Perform the data flow analysis for PRE.
259 3) Delete the redundant instructions
261 4) Insert the required copies [if any] that make the partially
262 redundant instructions fully redundant.
264 5) For other reaching expressions, insert an instruction to copy the value
265 to a newly created pseudo that will reach the redundant instruction.
267 The deletion is done first so that when we do insertions we
268 know which pseudo reg to use.
270 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
271 argue it is not. The number of iterations for the algorithm to converge
272 is typically 2-4 so I don't view it as that expensive (relatively speaking).
274 PRE GCSE depends heavily on the second CSE pass to clean up the copies
275 we create. To make an expression reach the place where it's redundant,
276 the result of the expression is copied to a new register, and the redundant
277 expression is deleted by replacing it with this new register. Classic GCSE
278 doesn't have this problem as much as it computes the reaching defs of
279 each register in each block and thus can try to use an existing
280 register. */
282 /* GCSE global vars. */
284 /* Set to non-zero if CSE should run after all GCSE optimizations are done. */
285 int flag_rerun_cse_after_global_opts;
287 /* An obstack for our working variables. */
288 static struct obstack gcse_obstack;
290 struct reg_use {rtx reg_rtx; };
292 /* Hash table of expressions. */
294 struct expr
296 /* The expression (SET_SRC for expressions, PATTERN for assignments). */
297 rtx expr;
298 /* Index in the available expression bitmaps. */
299 int bitmap_index;
300 /* Next entry with the same hash. */
301 struct expr *next_same_hash;
302 /* List of anticipatable occurrences in basic blocks in the function.
303 An "anticipatable occurrence" is one that is the first occurrence in the
304 basic block, the operands are not modified in the basic block prior
305 to the occurrence and the output is not used between the start of
306 the block and the occurrence. */
307 struct occr *antic_occr;
308 /* List of available occurrence in basic blocks in the function.
309 An "available occurrence" is one that is the last occurrence in the
310 basic block and the operands are not modified by following statements in
311 the basic block [including this insn]. */
312 struct occr *avail_occr;
313 /* Non-null if the computation is PRE redundant.
314 The value is the newly created pseudo-reg to record a copy of the
315 expression in all the places that reach the redundant copy. */
316 rtx reaching_reg;
319 /* Occurrence of an expression.
320 There is one per basic block. If a pattern appears more than once the
321 last appearance is used [or first for anticipatable expressions]. */
323 struct occr
325 /* Next occurrence of this expression. */
326 struct occr *next;
327 /* The insn that computes the expression. */
328 rtx insn;
329 /* Nonzero if this [anticipatable] occurrence has been deleted. */
330 char deleted_p;
331 /* Nonzero if this [available] occurrence has been copied to
332 reaching_reg. */
333 /* ??? This is mutually exclusive with deleted_p, so they could share
334 the same byte. */
335 char copied_p;
338 /* Expression and copy propagation hash tables.
339 Each hash table is an array of buckets.
340 ??? It is known that if it were an array of entries, structure elements
341 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
342 not clear whether in the final analysis a sufficient amount of memory would
343 be saved as the size of the available expression bitmaps would be larger
344 [one could build a mapping table without holes afterwards though].
345 Someday I'll perform the computation and figure it out. */
347 struct hash_table_d
349 /* The table itself.
350 This is an array of `expr_hash_table_size' elements. */
351 struct expr **table;
353 /* Size of the hash table, in elements. */
354 unsigned int size;
356 /* Number of hash table elements. */
357 unsigned int n_elems;
359 /* Whether the table is expression of copy propagation one. */
360 int set_p;
363 /* Expression hash table. */
364 static struct hash_table_d expr_hash_table;
366 /* Copy propagation hash table. */
367 static struct hash_table_d set_hash_table;
369 /* This is a list of expressions which are MEMs and will be used by load
370 or store motion.
371 Load motion tracks MEMs which aren't killed by
372 anything except itself. (i.e., loads and stores to a single location).
373 We can then allow movement of these MEM refs with a little special
374 allowance. (all stores copy the same value to the reaching reg used
375 for the loads). This means all values used to store into memory must have
376 no side effects so we can re-issue the setter value.
377 Store Motion uses this structure as an expression table to track stores
378 which look interesting, and might be moveable towards the exit block. */
380 struct ls_expr
382 struct expr * expr; /* Gcse expression reference for LM. */
383 rtx pattern; /* Pattern of this mem. */
384 rtx pattern_regs; /* List of registers mentioned by the mem. */
385 rtx loads; /* INSN list of loads seen. */
386 rtx stores; /* INSN list of stores seen. */
387 struct ls_expr * next; /* Next in the list. */
388 int invalid; /* Invalid for some reason. */
389 int index; /* If it maps to a bitmap index. */
390 unsigned int hash_index; /* Index when in a hash table. */
391 rtx reaching_reg; /* Register to use when re-writing. */
394 /* Array of implicit set patterns indexed by basic block index. */
395 static rtx *implicit_sets;
397 /* Head of the list of load/store memory refs. */
398 static struct ls_expr * pre_ldst_mems = NULL;
400 /* Hashtable for the load/store memory refs. */
401 static htab_t pre_ldst_table = NULL;
403 /* Bitmap containing one bit for each register in the program.
404 Used when performing GCSE to track which registers have been set since
405 the start of the basic block. */
406 static regset reg_set_bitmap;
408 /* Array, indexed by basic block number for a list of insns which modify
409 memory within that block. */
410 static rtx * modify_mem_list;
411 static bitmap modify_mem_list_set;
413 /* This array parallels modify_mem_list, but is kept canonicalized. */
414 static rtx * canon_modify_mem_list;
416 /* Bitmap indexed by block numbers to record which blocks contain
417 function calls. */
418 static bitmap blocks_with_calls;
420 /* Various variables for statistics gathering. */
422 /* Memory used in a pass.
423 This isn't intended to be absolutely precise. Its intent is only
424 to keep an eye on memory usage. */
425 static int bytes_used;
427 /* GCSE substitutions made. */
428 static int gcse_subst_count;
429 /* Number of copy instructions created. */
430 static int gcse_create_count;
431 /* Number of local constants propagated. */
432 static int local_const_prop_count;
433 /* Number of local copies propagated. */
434 static int local_copy_prop_count;
435 /* Number of global constants propagated. */
436 static int global_const_prop_count;
437 /* Number of global copies propagated. */
438 static int global_copy_prop_count;
440 /* For available exprs */
441 static sbitmap *ae_kill;
443 static void compute_can_copy (void);
444 static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
445 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
446 static void *gcse_alloc (unsigned long);
447 static void alloc_gcse_mem (void);
448 static void free_gcse_mem (void);
449 static void hash_scan_insn (rtx, struct hash_table_d *);
450 static void hash_scan_set (rtx, rtx, struct hash_table_d *);
451 static void hash_scan_clobber (rtx, rtx, struct hash_table_d *);
452 static void hash_scan_call (rtx, rtx, struct hash_table_d *);
453 static int want_to_gcse_p (rtx);
454 static bool gcse_constant_p (const_rtx);
455 static int oprs_unchanged_p (const_rtx, const_rtx, int);
456 static int oprs_anticipatable_p (const_rtx, const_rtx);
457 static int oprs_available_p (const_rtx, const_rtx);
458 static void insert_expr_in_table (rtx, enum machine_mode, rtx, int, int,
459 struct hash_table_d *);
460 static void insert_set_in_table (rtx, rtx, struct hash_table_d *);
461 static unsigned int hash_expr (const_rtx, enum machine_mode, int *, int);
462 static unsigned int hash_set (int, int);
463 static int expr_equiv_p (const_rtx, const_rtx);
464 static void record_last_reg_set_info (rtx, int);
465 static void record_last_mem_set_info (rtx);
466 static void record_last_set_info (rtx, const_rtx, void *);
467 static void compute_hash_table (struct hash_table_d *);
468 static void alloc_hash_table (struct hash_table_d *, int);
469 static void free_hash_table (struct hash_table_d *);
470 static void compute_hash_table_work (struct hash_table_d *);
471 static void dump_hash_table (FILE *, const char *, struct hash_table_d *);
472 static struct expr *lookup_set (unsigned int, struct hash_table_d *);
473 static struct expr *next_set (unsigned int, struct expr *);
474 static void reset_opr_set_tables (void);
475 static int oprs_not_set_p (const_rtx, const_rtx);
476 static void mark_call (rtx);
477 static void mark_set (rtx, rtx);
478 static void mark_clobber (rtx, rtx);
479 static void mark_oprs_set (rtx);
480 static void alloc_cprop_mem (int, int);
481 static void free_cprop_mem (void);
482 static void compute_transp (const_rtx, int, sbitmap *, int);
483 static void compute_transpout (void);
484 static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
485 struct hash_table_d *);
486 static void compute_cprop_data (void);
487 static void find_used_regs (rtx *, void *);
488 static int try_replace_reg (rtx, rtx, rtx);
489 static struct expr *find_avail_set (int, rtx);
490 static int cprop_jump (basic_block, rtx, rtx, rtx, rtx);
491 static void mems_conflict_for_gcse_p (rtx, const_rtx, void *);
492 static int load_killed_in_block_p (const_basic_block, int, const_rtx, int);
493 static void canon_list_insert (rtx, const_rtx, void *);
494 static int cprop_insn (rtx);
495 static void find_implicit_sets (void);
496 static int one_cprop_pass (void);
497 static bool constprop_register (rtx, rtx, rtx);
498 static struct expr *find_bypass_set (int, int);
499 static bool reg_killed_on_edge (const_rtx, const_edge);
500 static int bypass_block (basic_block, rtx, rtx);
501 static int bypass_conditional_jumps (void);
502 static void alloc_pre_mem (int, int);
503 static void free_pre_mem (void);
504 static void compute_pre_data (void);
505 static int pre_expr_reaches_here_p (basic_block, struct expr *,
506 basic_block);
507 static void insert_insn_end_basic_block (struct expr *, basic_block, int);
508 static void pre_insert_copy_insn (struct expr *, rtx);
509 static void pre_insert_copies (void);
510 static int pre_delete (void);
511 static int pre_gcse (void);
512 static int one_pre_gcse_pass (void);
513 static void add_label_notes (rtx, rtx);
514 static void alloc_code_hoist_mem (int, int);
515 static void free_code_hoist_mem (void);
516 static void compute_code_hoist_vbeinout (void);
517 static void compute_code_hoist_data (void);
518 static int hoist_expr_reaches_here_p (basic_block, int, basic_block, char *);
519 static int hoist_code (void);
520 static int one_code_hoisting_pass (void);
521 static rtx process_insert_insn (struct expr *);
522 static int pre_edge_insert (struct edge_list *, struct expr **);
523 static int pre_expr_reaches_here_p_work (basic_block, struct expr *,
524 basic_block, char *);
525 static struct ls_expr * ldst_entry (rtx);
526 static void free_ldst_entry (struct ls_expr *);
527 static void free_ldst_mems (void);
528 static void print_ldst_list (FILE *);
529 static struct ls_expr * find_rtx_in_ldst (rtx);
530 static inline struct ls_expr * first_ls_expr (void);
531 static inline struct ls_expr * next_ls_expr (struct ls_expr *);
532 static int simple_mem (const_rtx);
533 static void invalidate_any_buried_refs (rtx);
534 static void compute_ld_motion_mems (void);
535 static void trim_ld_motion_mems (void);
536 static void update_ld_motion_stores (struct expr *);
537 static void free_insn_expr_list_list (rtx *);
538 static void clear_modify_mem_tables (void);
539 static void free_modify_mem_tables (void);
540 static rtx gcse_emit_move_after (rtx, rtx, rtx);
541 static void local_cprop_find_used_regs (rtx *, void *);
542 static bool do_local_cprop (rtx, rtx);
543 static int local_cprop_pass (void);
544 static bool is_too_expensive (const char *);
546 #define GNEW(T) ((T *) gmalloc (sizeof (T)))
547 #define GCNEW(T) ((T *) gcalloc (1, sizeof (T)))
549 #define GNEWVEC(T, N) ((T *) gmalloc (sizeof (T) * (N)))
550 #define GCNEWVEC(T, N) ((T *) gcalloc ((N), sizeof (T)))
552 #define GNEWVAR(T, S) ((T *) gmalloc ((S)))
553 #define GCNEWVAR(T, S) ((T *) gcalloc (1, (S)))
555 #define GOBNEW(T) ((T *) gcse_alloc (sizeof (T)))
556 #define GOBNEWVAR(T, S) ((T *) gcse_alloc ((S)))
558 /* Misc. utilities. */
560 /* Nonzero for each mode that supports (set (reg) (reg)).
561 This is trivially true for integer and floating point values.
562 It may or may not be true for condition codes. */
563 static char can_copy[(int) NUM_MACHINE_MODES];
565 /* Compute which modes support reg/reg copy operations. */
567 static void
568 compute_can_copy (void)
570 int i;
571 #ifndef AVOID_CCMODE_COPIES
572 rtx reg, insn;
573 #endif
574 memset (can_copy, 0, NUM_MACHINE_MODES);
576 start_sequence ();
577 for (i = 0; i < NUM_MACHINE_MODES; i++)
578 if (GET_MODE_CLASS (i) == MODE_CC)
580 #ifdef AVOID_CCMODE_COPIES
581 can_copy[i] = 0;
582 #else
583 reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
584 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
585 if (recog (PATTERN (insn), insn, NULL) >= 0)
586 can_copy[i] = 1;
587 #endif
589 else
590 can_copy[i] = 1;
592 end_sequence ();
595 /* Returns whether the mode supports reg/reg copy operations. */
597 bool
598 can_copy_p (enum machine_mode mode)
600 static bool can_copy_init_p = false;
602 if (! can_copy_init_p)
604 compute_can_copy ();
605 can_copy_init_p = true;
608 return can_copy[mode] != 0;
612 /* Cover function to xmalloc to record bytes allocated. */
614 static void *
615 gmalloc (size_t size)
617 bytes_used += size;
618 return xmalloc (size);
621 /* Cover function to xcalloc to record bytes allocated. */
623 static void *
624 gcalloc (size_t nelem, size_t elsize)
626 bytes_used += nelem * elsize;
627 return xcalloc (nelem, elsize);
630 /* Cover function to obstack_alloc. */
632 static void *
633 gcse_alloc (unsigned long size)
635 bytes_used += size;
636 return obstack_alloc (&gcse_obstack, size);
639 /* Allocate memory for the reg/memory set tracking tables.
640 This is called at the start of each pass. */
642 static void
643 alloc_gcse_mem (void)
645 /* Allocate vars to track sets of regs. */
646 reg_set_bitmap = BITMAP_ALLOC (NULL);
648 /* Allocate array to keep a list of insns which modify memory in each
649 basic block. */
650 modify_mem_list = GCNEWVEC (rtx, last_basic_block);
651 canon_modify_mem_list = GCNEWVEC (rtx, last_basic_block);
652 modify_mem_list_set = BITMAP_ALLOC (NULL);
653 blocks_with_calls = BITMAP_ALLOC (NULL);
656 /* Free memory allocated by alloc_gcse_mem. */
658 static void
659 free_gcse_mem (void)
661 free_modify_mem_tables ();
662 BITMAP_FREE (modify_mem_list_set);
663 BITMAP_FREE (blocks_with_calls);
666 /* Compute the local properties of each recorded expression.
668 Local properties are those that are defined by the block, irrespective of
669 other blocks.
671 An expression is transparent in a block if its operands are not modified
672 in the block.
674 An expression is computed (locally available) in a block if it is computed
675 at least once and expression would contain the same value if the
676 computation was moved to the end of the block.
678 An expression is locally anticipatable in a block if it is computed at
679 least once and expression would contain the same value if the computation
680 was moved to the beginning of the block.
682 We call this routine for cprop, pre and code hoisting. They all compute
683 basically the same information and thus can easily share this code.
685 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
686 properties. If NULL, then it is not necessary to compute or record that
687 particular property.
689 TABLE controls which hash table to look at. If it is set hash table,
690 additionally, TRANSP is computed as ~TRANSP, since this is really cprop's
691 ABSALTERED. */
693 static void
694 compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc,
695 struct hash_table_d *table)
697 unsigned int i;
699 /* Initialize any bitmaps that were passed in. */
700 if (transp)
702 if (table->set_p)
703 sbitmap_vector_zero (transp, last_basic_block);
704 else
705 sbitmap_vector_ones (transp, last_basic_block);
708 if (comp)
709 sbitmap_vector_zero (comp, last_basic_block);
710 if (antloc)
711 sbitmap_vector_zero (antloc, last_basic_block);
713 for (i = 0; i < table->size; i++)
715 struct expr *expr;
717 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
719 int indx = expr->bitmap_index;
720 struct occr *occr;
722 /* The expression is transparent in this block if it is not killed.
723 We start by assuming all are transparent [none are killed], and
724 then reset the bits for those that are. */
725 if (transp)
726 compute_transp (expr->expr, indx, transp, table->set_p);
728 /* The occurrences recorded in antic_occr are exactly those that
729 we want to set to nonzero in ANTLOC. */
730 if (antloc)
731 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
733 SET_BIT (antloc[BLOCK_NUM (occr->insn)], indx);
735 /* While we're scanning the table, this is a good place to
736 initialize this. */
737 occr->deleted_p = 0;
740 /* The occurrences recorded in avail_occr are exactly those that
741 we want to set to nonzero in COMP. */
742 if (comp)
743 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
745 SET_BIT (comp[BLOCK_NUM (occr->insn)], indx);
747 /* While we're scanning the table, this is a good place to
748 initialize this. */
749 occr->copied_p = 0;
752 /* While we're scanning the table, this is a good place to
753 initialize this. */
754 expr->reaching_reg = 0;
759 /* Hash table support. */
761 struct reg_avail_info
763 basic_block last_bb;
764 int first_set;
765 int last_set;
768 static struct reg_avail_info *reg_avail_info;
769 static basic_block current_bb;
772 /* See whether X, the source of a set, is something we want to consider for
773 GCSE. */
775 static int
776 want_to_gcse_p (rtx x)
778 #ifdef STACK_REGS
779 /* On register stack architectures, don't GCSE constants from the
780 constant pool, as the benefits are often swamped by the overhead
781 of shuffling the register stack between basic blocks. */
782 if (IS_STACK_MODE (GET_MODE (x)))
783 x = avoid_constant_pool_reference (x);
784 #endif
786 switch (GET_CODE (x))
788 case REG:
789 case SUBREG:
790 case CONST_INT:
791 case CONST_DOUBLE:
792 case CONST_FIXED:
793 case CONST_VECTOR:
794 case CALL:
795 return 0;
797 default:
798 return can_assign_to_reg_without_clobbers_p (x);
802 /* Used internally by can_assign_to_reg_without_clobbers_p. */
804 static GTY(()) rtx test_insn;
806 /* Return true if we can assign X to a pseudo register such that the
807 resulting insn does not result in clobbering a hard register as a
808 side-effect.
810 Additionally, if the target requires it, check that the resulting insn
811 can be copied. If it cannot, this means that X is special and probably
812 has hidden side-effects we don't want to mess with.
814 This function is typically used by code motion passes, to verify
815 that it is safe to insert an insn without worrying about clobbering
816 maybe live hard regs. */
818 bool
819 can_assign_to_reg_without_clobbers_p (rtx x)
821 int num_clobbers = 0;
822 int icode;
824 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
825 if (general_operand (x, GET_MODE (x)))
826 return 1;
827 else if (GET_MODE (x) == VOIDmode)
828 return 0;
830 /* Otherwise, check if we can make a valid insn from it. First initialize
831 our test insn if we haven't already. */
832 if (test_insn == 0)
834 test_insn
835 = make_insn_raw (gen_rtx_SET (VOIDmode,
836 gen_rtx_REG (word_mode,
837 FIRST_PSEUDO_REGISTER * 2),
838 const0_rtx));
839 NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0;
842 /* Now make an insn like the one we would make when GCSE'ing and see if
843 valid. */
844 PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
845 SET_SRC (PATTERN (test_insn)) = x;
847 icode = recog (PATTERN (test_insn), test_insn, &num_clobbers);
848 if (icode < 0)
849 return false;
851 if (num_clobbers > 0 && added_clobbers_hard_reg_p (icode))
852 return false;
854 if (targetm.cannot_copy_insn_p && targetm.cannot_copy_insn_p (test_insn))
855 return false;
857 return true;
860 /* Return nonzero if the operands of expression X are unchanged from the
861 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
862 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
864 static int
865 oprs_unchanged_p (const_rtx x, const_rtx insn, int avail_p)
867 int i, j;
868 enum rtx_code code;
869 const char *fmt;
871 if (x == 0)
872 return 1;
874 code = GET_CODE (x);
875 switch (code)
877 case REG:
879 struct reg_avail_info *info = &reg_avail_info[REGNO (x)];
881 if (info->last_bb != current_bb)
882 return 1;
883 if (avail_p)
884 return info->last_set < DF_INSN_LUID (insn);
885 else
886 return info->first_set >= DF_INSN_LUID (insn);
889 case MEM:
890 if (load_killed_in_block_p (current_bb, DF_INSN_LUID (insn),
891 x, avail_p))
892 return 0;
893 else
894 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
896 case PRE_DEC:
897 case PRE_INC:
898 case POST_DEC:
899 case POST_INC:
900 case PRE_MODIFY:
901 case POST_MODIFY:
902 return 0;
904 case PC:
905 case CC0: /*FIXME*/
906 case CONST:
907 case CONST_INT:
908 case CONST_DOUBLE:
909 case CONST_FIXED:
910 case CONST_VECTOR:
911 case SYMBOL_REF:
912 case LABEL_REF:
913 case ADDR_VEC:
914 case ADDR_DIFF_VEC:
915 return 1;
917 default:
918 break;
921 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
923 if (fmt[i] == 'e')
925 /* If we are about to do the last recursive call needed at this
926 level, change it into iteration. This function is called enough
927 to be worth it. */
928 if (i == 0)
929 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
931 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
932 return 0;
934 else if (fmt[i] == 'E')
935 for (j = 0; j < XVECLEN (x, i); j++)
936 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
937 return 0;
940 return 1;
943 /* Used for communication between mems_conflict_for_gcse_p and
944 load_killed_in_block_p. Nonzero if mems_conflict_for_gcse_p finds a
945 conflict between two memory references. */
946 static int gcse_mems_conflict_p;
948 /* Used for communication between mems_conflict_for_gcse_p and
949 load_killed_in_block_p. A memory reference for a load instruction,
950 mems_conflict_for_gcse_p will see if a memory store conflicts with
951 this memory load. */
952 static const_rtx gcse_mem_operand;
954 /* DEST is the output of an instruction. If it is a memory reference, and
955 possibly conflicts with the load found in gcse_mem_operand, then set
956 gcse_mems_conflict_p to a nonzero value. */
958 static void
959 mems_conflict_for_gcse_p (rtx dest, const_rtx setter ATTRIBUTE_UNUSED,
960 void *data ATTRIBUTE_UNUSED)
962 while (GET_CODE (dest) == SUBREG
963 || GET_CODE (dest) == ZERO_EXTRACT
964 || GET_CODE (dest) == STRICT_LOW_PART)
965 dest = XEXP (dest, 0);
967 /* If DEST is not a MEM, then it will not conflict with the load. Note
968 that function calls are assumed to clobber memory, but are handled
969 elsewhere. */
970 if (! MEM_P (dest))
971 return;
973 /* If we are setting a MEM in our list of specially recognized MEMs,
974 don't mark as killed this time. */
976 if (expr_equiv_p (dest, gcse_mem_operand) && pre_ldst_mems != NULL)
978 if (!find_rtx_in_ldst (dest))
979 gcse_mems_conflict_p = 1;
980 return;
983 if (true_dependence (dest, GET_MODE (dest), gcse_mem_operand,
984 rtx_addr_varies_p))
985 gcse_mems_conflict_p = 1;
988 /* Return nonzero if the expression in X (a memory reference) is killed
989 in block BB before or after the insn with the LUID in UID_LIMIT.
990 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
991 before UID_LIMIT.
993 To check the entire block, set UID_LIMIT to max_uid + 1 and
994 AVAIL_P to 0. */
996 static int
997 load_killed_in_block_p (const_basic_block bb, int uid_limit, const_rtx x, int avail_p)
999 rtx list_entry = modify_mem_list[bb->index];
1001 /* If this is a readonly then we aren't going to be changing it. */
1002 if (MEM_READONLY_P (x))
1003 return 0;
1005 while (list_entry)
1007 rtx setter;
1008 /* Ignore entries in the list that do not apply. */
1009 if ((avail_p
1010 && DF_INSN_LUID (XEXP (list_entry, 0)) < uid_limit)
1011 || (! avail_p
1012 && DF_INSN_LUID (XEXP (list_entry, 0)) > uid_limit))
1014 list_entry = XEXP (list_entry, 1);
1015 continue;
1018 setter = XEXP (list_entry, 0);
1020 /* If SETTER is a call everything is clobbered. Note that calls
1021 to pure functions are never put on the list, so we need not
1022 worry about them. */
1023 if (CALL_P (setter))
1024 return 1;
1026 /* SETTER must be an INSN of some kind that sets memory. Call
1027 note_stores to examine each hunk of memory that is modified.
1029 The note_stores interface is pretty limited, so we have to
1030 communicate via global variables. Yuk. */
1031 gcse_mem_operand = x;
1032 gcse_mems_conflict_p = 0;
1033 note_stores (PATTERN (setter), mems_conflict_for_gcse_p, NULL);
1034 if (gcse_mems_conflict_p)
1035 return 1;
1036 list_entry = XEXP (list_entry, 1);
1038 return 0;
1041 /* Return nonzero if the operands of expression X are unchanged from
1042 the start of INSN's basic block up to but not including INSN. */
1044 static int
1045 oprs_anticipatable_p (const_rtx x, const_rtx insn)
1047 return oprs_unchanged_p (x, insn, 0);
1050 /* Return nonzero if the operands of expression X are unchanged from
1051 INSN to the end of INSN's basic block. */
1053 static int
1054 oprs_available_p (const_rtx x, const_rtx insn)
1056 return oprs_unchanged_p (x, insn, 1);
1059 /* Hash expression X.
1061 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1062 indicating if a volatile operand is found or if the expression contains
1063 something we don't want to insert in the table. HASH_TABLE_SIZE is
1064 the current size of the hash table to be probed. */
1066 static unsigned int
1067 hash_expr (const_rtx x, enum machine_mode mode, int *do_not_record_p,
1068 int hash_table_size)
1070 unsigned int hash;
1072 *do_not_record_p = 0;
1074 hash = hash_rtx (x, mode, do_not_record_p,
1075 NULL, /*have_reg_qty=*/false);
1076 return hash % hash_table_size;
1079 /* Hash a set of register REGNO.
1081 Sets are hashed on the register that is set. This simplifies the PRE copy
1082 propagation code.
1084 ??? May need to make things more elaborate. Later, as necessary. */
1086 static unsigned int
1087 hash_set (int regno, int hash_table_size)
1089 unsigned int hash;
1091 hash = regno;
1092 return hash % hash_table_size;
1095 /* Return nonzero if exp1 is equivalent to exp2. */
1097 static int
1098 expr_equiv_p (const_rtx x, const_rtx y)
1100 return exp_equiv_p (x, y, 0, true);
1103 /* Insert expression X in INSN in the hash TABLE.
1104 If it is already present, record it as the last occurrence in INSN's
1105 basic block.
1107 MODE is the mode of the value X is being stored into.
1108 It is only used if X is a CONST_INT.
1110 ANTIC_P is nonzero if X is an anticipatable expression.
1111 AVAIL_P is nonzero if X is an available expression. */
1113 static void
1114 insert_expr_in_table (rtx x, enum machine_mode mode, rtx insn, int antic_p,
1115 int avail_p, struct hash_table_d *table)
1117 int found, do_not_record_p;
1118 unsigned int hash;
1119 struct expr *cur_expr, *last_expr = NULL;
1120 struct occr *antic_occr, *avail_occr;
1122 hash = hash_expr (x, mode, &do_not_record_p, table->size);
1124 /* Do not insert expression in table if it contains volatile operands,
1125 or if hash_expr determines the expression is something we don't want
1126 to or can't handle. */
1127 if (do_not_record_p)
1128 return;
1130 cur_expr = table->table[hash];
1131 found = 0;
1133 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1135 /* If the expression isn't found, save a pointer to the end of
1136 the list. */
1137 last_expr = cur_expr;
1138 cur_expr = cur_expr->next_same_hash;
1141 if (! found)
1143 cur_expr = GOBNEW (struct expr);
1144 bytes_used += sizeof (struct expr);
1145 if (table->table[hash] == NULL)
1146 /* This is the first pattern that hashed to this index. */
1147 table->table[hash] = cur_expr;
1148 else
1149 /* Add EXPR to end of this hash chain. */
1150 last_expr->next_same_hash = cur_expr;
1152 /* Set the fields of the expr element. */
1153 cur_expr->expr = x;
1154 cur_expr->bitmap_index = table->n_elems++;
1155 cur_expr->next_same_hash = NULL;
1156 cur_expr->antic_occr = NULL;
1157 cur_expr->avail_occr = NULL;
1160 /* Now record the occurrence(s). */
1161 if (antic_p)
1163 antic_occr = cur_expr->antic_occr;
1165 if (antic_occr && BLOCK_NUM (antic_occr->insn) != BLOCK_NUM (insn))
1166 antic_occr = NULL;
1168 if (antic_occr)
1169 /* Found another instance of the expression in the same basic block.
1170 Prefer the currently recorded one. We want the first one in the
1171 block and the block is scanned from start to end. */
1172 ; /* nothing to do */
1173 else
1175 /* First occurrence of this expression in this basic block. */
1176 antic_occr = GOBNEW (struct occr);
1177 bytes_used += sizeof (struct occr);
1178 antic_occr->insn = insn;
1179 antic_occr->next = cur_expr->antic_occr;
1180 antic_occr->deleted_p = 0;
1181 cur_expr->antic_occr = antic_occr;
1185 if (avail_p)
1187 avail_occr = cur_expr->avail_occr;
1189 if (avail_occr && BLOCK_NUM (avail_occr->insn) == BLOCK_NUM (insn))
1191 /* Found another instance of the expression in the same basic block.
1192 Prefer this occurrence to the currently recorded one. We want
1193 the last one in the block and the block is scanned from start
1194 to end. */
1195 avail_occr->insn = insn;
1197 else
1199 /* First occurrence of this expression in this basic block. */
1200 avail_occr = GOBNEW (struct occr);
1201 bytes_used += sizeof (struct occr);
1202 avail_occr->insn = insn;
1203 avail_occr->next = cur_expr->avail_occr;
1204 avail_occr->deleted_p = 0;
1205 cur_expr->avail_occr = avail_occr;
1210 /* Insert pattern X in INSN in the hash table.
1211 X is a SET of a reg to either another reg or a constant.
1212 If it is already present, record it as the last occurrence in INSN's
1213 basic block. */
1215 static void
1216 insert_set_in_table (rtx x, rtx insn, struct hash_table_d *table)
1218 int found;
1219 unsigned int hash;
1220 struct expr *cur_expr, *last_expr = NULL;
1221 struct occr *cur_occr;
1223 gcc_assert (GET_CODE (x) == SET && REG_P (SET_DEST (x)));
1225 hash = hash_set (REGNO (SET_DEST (x)), table->size);
1227 cur_expr = table->table[hash];
1228 found = 0;
1230 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1232 /* If the expression isn't found, save a pointer to the end of
1233 the list. */
1234 last_expr = cur_expr;
1235 cur_expr = cur_expr->next_same_hash;
1238 if (! found)
1240 cur_expr = GOBNEW (struct expr);
1241 bytes_used += sizeof (struct expr);
1242 if (table->table[hash] == NULL)
1243 /* This is the first pattern that hashed to this index. */
1244 table->table[hash] = cur_expr;
1245 else
1246 /* Add EXPR to end of this hash chain. */
1247 last_expr->next_same_hash = cur_expr;
1249 /* Set the fields of the expr element.
1250 We must copy X because it can be modified when copy propagation is
1251 performed on its operands. */
1252 cur_expr->expr = copy_rtx (x);
1253 cur_expr->bitmap_index = table->n_elems++;
1254 cur_expr->next_same_hash = NULL;
1255 cur_expr->antic_occr = NULL;
1256 cur_expr->avail_occr = NULL;
1259 /* Now record the occurrence. */
1260 cur_occr = cur_expr->avail_occr;
1262 if (cur_occr && BLOCK_NUM (cur_occr->insn) == BLOCK_NUM (insn))
1264 /* Found another instance of the expression in the same basic block.
1265 Prefer this occurrence to the currently recorded one. We want
1266 the last one in the block and the block is scanned from start
1267 to end. */
1268 cur_occr->insn = insn;
1270 else
1272 /* First occurrence of this expression in this basic block. */
1273 cur_occr = GOBNEW (struct occr);
1274 bytes_used += sizeof (struct occr);
1275 cur_occr->insn = insn;
1276 cur_occr->next = cur_expr->avail_occr;
1277 cur_occr->deleted_p = 0;
1278 cur_expr->avail_occr = cur_occr;
1282 /* Determine whether the rtx X should be treated as a constant for
1283 the purposes of GCSE's constant propagation. */
1285 static bool
1286 gcse_constant_p (const_rtx x)
1288 /* Consider a COMPARE of two integers constant. */
1289 if (GET_CODE (x) == COMPARE
1290 && CONST_INT_P (XEXP (x, 0))
1291 && CONST_INT_P (XEXP (x, 1)))
1292 return true;
1294 /* Consider a COMPARE of the same registers is a constant
1295 if they are not floating point registers. */
1296 if (GET_CODE(x) == COMPARE
1297 && REG_P (XEXP (x, 0)) && REG_P (XEXP (x, 1))
1298 && REGNO (XEXP (x, 0)) == REGNO (XEXP (x, 1))
1299 && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 0)))
1300 && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 1))))
1301 return true;
1303 /* Since X might be inserted more than once we have to take care that it
1304 is sharable. */
1305 return CONSTANT_P (x) && (GET_CODE (x) != CONST || shared_const_p (x));
1308 /* Scan pattern PAT of INSN and add an entry to the hash TABLE (set or
1309 expression one). */
1311 static void
1312 hash_scan_set (rtx pat, rtx insn, struct hash_table_d *table)
1314 rtx src = SET_SRC (pat);
1315 rtx dest = SET_DEST (pat);
1316 rtx note;
1318 if (GET_CODE (src) == CALL)
1319 hash_scan_call (src, insn, table);
1321 else if (REG_P (dest))
1323 unsigned int regno = REGNO (dest);
1324 rtx tmp;
1326 /* See if a REG_EQUAL note shows this equivalent to a simpler expression.
1328 This allows us to do a single GCSE pass and still eliminate
1329 redundant constants, addresses or other expressions that are
1330 constructed with multiple instructions.
1332 However, keep the original SRC if INSN is a simple reg-reg move. In
1333 In this case, there will almost always be a REG_EQUAL note on the
1334 insn that sets SRC. By recording the REG_EQUAL value here as SRC
1335 for INSN, we miss copy propagation opportunities and we perform the
1336 same PRE GCSE operation repeatedly on the same REG_EQUAL value if we
1337 do more than one PRE GCSE pass.
1339 Note that this does not impede profitable constant propagations. We
1340 "look through" reg-reg sets in lookup_avail_set. */
1341 note = find_reg_equal_equiv_note (insn);
1342 if (note != 0
1343 && REG_NOTE_KIND (note) == REG_EQUAL
1344 && !REG_P (src)
1345 && (table->set_p
1346 ? gcse_constant_p (XEXP (note, 0))
1347 : want_to_gcse_p (XEXP (note, 0))))
1348 src = XEXP (note, 0), pat = gen_rtx_SET (VOIDmode, dest, src);
1350 /* Only record sets of pseudo-regs in the hash table. */
1351 if (! table->set_p
1352 && regno >= FIRST_PSEUDO_REGISTER
1353 /* Don't GCSE something if we can't do a reg/reg copy. */
1354 && can_copy_p (GET_MODE (dest))
1355 /* GCSE commonly inserts instruction after the insn. We can't
1356 do that easily for EH_REGION notes so disable GCSE on these
1357 for now. */
1358 && !find_reg_note (insn, REG_EH_REGION, NULL_RTX)
1359 /* Is SET_SRC something we want to gcse? */
1360 && want_to_gcse_p (src)
1361 /* Don't CSE a nop. */
1362 && ! set_noop_p (pat)
1363 /* Don't GCSE if it has attached REG_EQUIV note.
1364 At this point this only function parameters should have
1365 REG_EQUIV notes and if the argument slot is used somewhere
1366 explicitly, it means address of parameter has been taken,
1367 so we should not extend the lifetime of the pseudo. */
1368 && (note == NULL_RTX || ! MEM_P (XEXP (note, 0))))
1370 /* An expression is not anticipatable if its operands are
1371 modified before this insn or if this is not the only SET in
1372 this insn. The latter condition does not have to mean that
1373 SRC itself is not anticipatable, but we just will not be
1374 able to handle code motion of insns with multiple sets. */
1375 int antic_p = oprs_anticipatable_p (src, insn)
1376 && !multiple_sets (insn);
1377 /* An expression is not available if its operands are
1378 subsequently modified, including this insn. It's also not
1379 available if this is a branch, because we can't insert
1380 a set after the branch. */
1381 int avail_p = (oprs_available_p (src, insn)
1382 && ! JUMP_P (insn));
1384 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p, table);
1387 /* Record sets for constant/copy propagation. */
1388 else if (table->set_p
1389 && regno >= FIRST_PSEUDO_REGISTER
1390 && ((REG_P (src)
1391 && REGNO (src) >= FIRST_PSEUDO_REGISTER
1392 && can_copy_p (GET_MODE (dest))
1393 && REGNO (src) != regno)
1394 || gcse_constant_p (src))
1395 /* A copy is not available if its src or dest is subsequently
1396 modified. Here we want to search from INSN+1 on, but
1397 oprs_available_p searches from INSN on. */
1398 && (insn == BB_END (BLOCK_FOR_INSN (insn))
1399 || (tmp = next_nonnote_insn (insn)) == NULL_RTX
1400 || BLOCK_FOR_INSN (tmp) != BLOCK_FOR_INSN (insn)
1401 || oprs_available_p (pat, tmp)))
1402 insert_set_in_table (pat, insn, table);
1404 /* In case of store we want to consider the memory value as available in
1405 the REG stored in that memory. This makes it possible to remove
1406 redundant loads from due to stores to the same location. */
1407 else if (flag_gcse_las && REG_P (src) && MEM_P (dest))
1409 unsigned int regno = REGNO (src);
1411 /* Do not do this for constant/copy propagation. */
1412 if (! table->set_p
1413 /* Only record sets of pseudo-regs in the hash table. */
1414 && regno >= FIRST_PSEUDO_REGISTER
1415 /* Don't GCSE something if we can't do a reg/reg copy. */
1416 && can_copy_p (GET_MODE (src))
1417 /* GCSE commonly inserts instruction after the insn. We can't
1418 do that easily for EH_REGION notes so disable GCSE on these
1419 for now. */
1420 && ! find_reg_note (insn, REG_EH_REGION, NULL_RTX)
1421 /* Is SET_DEST something we want to gcse? */
1422 && want_to_gcse_p (dest)
1423 /* Don't CSE a nop. */
1424 && ! set_noop_p (pat)
1425 /* Don't GCSE if it has attached REG_EQUIV note.
1426 At this point this only function parameters should have
1427 REG_EQUIV notes and if the argument slot is used somewhere
1428 explicitly, it means address of parameter has been taken,
1429 so we should not extend the lifetime of the pseudo. */
1430 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
1431 || ! MEM_P (XEXP (note, 0))))
1433 /* Stores are never anticipatable. */
1434 int antic_p = 0;
1435 /* An expression is not available if its operands are
1436 subsequently modified, including this insn. It's also not
1437 available if this is a branch, because we can't insert
1438 a set after the branch. */
1439 int avail_p = oprs_available_p (dest, insn)
1440 && ! JUMP_P (insn);
1442 /* Record the memory expression (DEST) in the hash table. */
1443 insert_expr_in_table (dest, GET_MODE (dest), insn,
1444 antic_p, avail_p, table);
1449 static void
1450 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1451 struct hash_table_d *table ATTRIBUTE_UNUSED)
1453 /* Currently nothing to do. */
1456 static void
1457 hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1458 struct hash_table_d *table ATTRIBUTE_UNUSED)
1460 /* Currently nothing to do. */
1463 /* Process INSN and add hash table entries as appropriate.
1465 Only available expressions that set a single pseudo-reg are recorded.
1467 Single sets in a PARALLEL could be handled, but it's an extra complication
1468 that isn't dealt with right now. The trick is handling the CLOBBERs that
1469 are also in the PARALLEL. Later.
1471 If SET_P is nonzero, this is for the assignment hash table,
1472 otherwise it is for the expression hash table. */
1474 static void
1475 hash_scan_insn (rtx insn, struct hash_table_d *table)
1477 rtx pat = PATTERN (insn);
1478 int i;
1480 /* Pick out the sets of INSN and for other forms of instructions record
1481 what's been modified. */
1483 if (GET_CODE (pat) == SET)
1484 hash_scan_set (pat, insn, table);
1485 else if (GET_CODE (pat) == PARALLEL)
1486 for (i = 0; i < XVECLEN (pat, 0); i++)
1488 rtx x = XVECEXP (pat, 0, i);
1490 if (GET_CODE (x) == SET)
1491 hash_scan_set (x, insn, table);
1492 else if (GET_CODE (x) == CLOBBER)
1493 hash_scan_clobber (x, insn, table);
1494 else if (GET_CODE (x) == CALL)
1495 hash_scan_call (x, insn, table);
1498 else if (GET_CODE (pat) == CLOBBER)
1499 hash_scan_clobber (pat, insn, table);
1500 else if (GET_CODE (pat) == CALL)
1501 hash_scan_call (pat, insn, table);
1504 static void
1505 dump_hash_table (FILE *file, const char *name, struct hash_table_d *table)
1507 int i;
1508 /* Flattened out table, so it's printed in proper order. */
1509 struct expr **flat_table;
1510 unsigned int *hash_val;
1511 struct expr *expr;
1513 flat_table = XCNEWVEC (struct expr *, table->n_elems);
1514 hash_val = XNEWVEC (unsigned int, table->n_elems);
1516 for (i = 0; i < (int) table->size; i++)
1517 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1519 flat_table[expr->bitmap_index] = expr;
1520 hash_val[expr->bitmap_index] = i;
1523 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
1524 name, table->size, table->n_elems);
1526 for (i = 0; i < (int) table->n_elems; i++)
1527 if (flat_table[i] != 0)
1529 expr = flat_table[i];
1530 fprintf (file, "Index %d (hash value %d)\n ",
1531 expr->bitmap_index, hash_val[i]);
1532 print_rtl (file, expr->expr);
1533 fprintf (file, "\n");
1536 fprintf (file, "\n");
1538 free (flat_table);
1539 free (hash_val);
1542 /* Record register first/last/block set information for REGNO in INSN.
1544 first_set records the first place in the block where the register
1545 is set and is used to compute "anticipatability".
1547 last_set records the last place in the block where the register
1548 is set and is used to compute "availability".
1550 last_bb records the block for which first_set and last_set are
1551 valid, as a quick test to invalidate them. */
1553 static void
1554 record_last_reg_set_info (rtx insn, int regno)
1556 struct reg_avail_info *info = &reg_avail_info[regno];
1557 int luid = DF_INSN_LUID (insn);
1559 info->last_set = luid;
1560 if (info->last_bb != current_bb)
1562 info->last_bb = current_bb;
1563 info->first_set = luid;
1568 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
1569 Note we store a pair of elements in the list, so they have to be
1570 taken off pairwise. */
1572 static void
1573 canon_list_insert (rtx dest ATTRIBUTE_UNUSED, const_rtx unused1 ATTRIBUTE_UNUSED,
1574 void * v_insn)
1576 rtx dest_addr, insn;
1577 int bb;
1579 while (GET_CODE (dest) == SUBREG
1580 || GET_CODE (dest) == ZERO_EXTRACT
1581 || GET_CODE (dest) == STRICT_LOW_PART)
1582 dest = XEXP (dest, 0);
1584 /* If DEST is not a MEM, then it will not conflict with a load. Note
1585 that function calls are assumed to clobber memory, but are handled
1586 elsewhere. */
1588 if (! MEM_P (dest))
1589 return;
1591 dest_addr = get_addr (XEXP (dest, 0));
1592 dest_addr = canon_rtx (dest_addr);
1593 insn = (rtx) v_insn;
1594 bb = BLOCK_NUM (insn);
1596 canon_modify_mem_list[bb] =
1597 alloc_EXPR_LIST (VOIDmode, dest_addr, canon_modify_mem_list[bb]);
1598 canon_modify_mem_list[bb] =
1599 alloc_EXPR_LIST (VOIDmode, dest, canon_modify_mem_list[bb]);
1602 /* Record memory modification information for INSN. We do not actually care
1603 about the memory location(s) that are set, or even how they are set (consider
1604 a CALL_INSN). We merely need to record which insns modify memory. */
1606 static void
1607 record_last_mem_set_info (rtx insn)
1609 int bb = BLOCK_NUM (insn);
1611 /* load_killed_in_block_p will handle the case of calls clobbering
1612 everything. */
1613 modify_mem_list[bb] = alloc_INSN_LIST (insn, modify_mem_list[bb]);
1614 bitmap_set_bit (modify_mem_list_set, bb);
1616 if (CALL_P (insn))
1618 /* Note that traversals of this loop (other than for free-ing)
1619 will break after encountering a CALL_INSN. So, there's no
1620 need to insert a pair of items, as canon_list_insert does. */
1621 canon_modify_mem_list[bb] =
1622 alloc_INSN_LIST (insn, canon_modify_mem_list[bb]);
1623 bitmap_set_bit (blocks_with_calls, bb);
1625 else
1626 note_stores (PATTERN (insn), canon_list_insert, (void*) insn);
1629 /* Called from compute_hash_table via note_stores to handle one
1630 SET or CLOBBER in an insn. DATA is really the instruction in which
1631 the SET is taking place. */
1633 static void
1634 record_last_set_info (rtx dest, const_rtx setter ATTRIBUTE_UNUSED, void *data)
1636 rtx last_set_insn = (rtx) data;
1638 if (GET_CODE (dest) == SUBREG)
1639 dest = SUBREG_REG (dest);
1641 if (REG_P (dest))
1642 record_last_reg_set_info (last_set_insn, REGNO (dest));
1643 else if (MEM_P (dest)
1644 /* Ignore pushes, they clobber nothing. */
1645 && ! push_operand (dest, GET_MODE (dest)))
1646 record_last_mem_set_info (last_set_insn);
1649 /* Top level function to create an expression or assignment hash table.
1651 Expression entries are placed in the hash table if
1652 - they are of the form (set (pseudo-reg) src),
1653 - src is something we want to perform GCSE on,
1654 - none of the operands are subsequently modified in the block
1656 Assignment entries are placed in the hash table if
1657 - they are of the form (set (pseudo-reg) src),
1658 - src is something we want to perform const/copy propagation on,
1659 - none of the operands or target are subsequently modified in the block
1661 Currently src must be a pseudo-reg or a const_int.
1663 TABLE is the table computed. */
1665 static void
1666 compute_hash_table_work (struct hash_table_d *table)
1668 int i;
1670 /* re-Cache any INSN_LIST nodes we have allocated. */
1671 clear_modify_mem_tables ();
1672 /* Some working arrays used to track first and last set in each block. */
1673 reg_avail_info = GNEWVEC (struct reg_avail_info, max_reg_num ());
1675 for (i = 0; i < max_reg_num (); ++i)
1676 reg_avail_info[i].last_bb = NULL;
1678 FOR_EACH_BB (current_bb)
1680 rtx insn;
1681 unsigned int regno;
1683 /* First pass over the instructions records information used to
1684 determine when registers and memory are first and last set. */
1685 FOR_BB_INSNS (current_bb, insn)
1687 if (! INSN_P (insn))
1688 continue;
1690 if (CALL_P (insn))
1692 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1693 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
1694 record_last_reg_set_info (insn, regno);
1696 mark_call (insn);
1699 note_stores (PATTERN (insn), record_last_set_info, insn);
1702 /* Insert implicit sets in the hash table. */
1703 if (table->set_p
1704 && implicit_sets[current_bb->index] != NULL_RTX)
1705 hash_scan_set (implicit_sets[current_bb->index],
1706 BB_HEAD (current_bb), table);
1708 /* The next pass builds the hash table. */
1709 FOR_BB_INSNS (current_bb, insn)
1710 if (INSN_P (insn))
1711 hash_scan_insn (insn, table);
1714 free (reg_avail_info);
1715 reg_avail_info = NULL;
1718 /* Allocate space for the set/expr hash TABLE.
1719 It is used to determine the number of buckets to use.
1720 SET_P determines whether set or expression table will
1721 be created. */
1723 static void
1724 alloc_hash_table (struct hash_table_d *table, int set_p)
1726 int n;
1728 n = get_max_insn_count ();
1730 table->size = n / 4;
1731 if (table->size < 11)
1732 table->size = 11;
1734 /* Attempt to maintain efficient use of hash table.
1735 Making it an odd number is simplest for now.
1736 ??? Later take some measurements. */
1737 table->size |= 1;
1738 n = table->size * sizeof (struct expr *);
1739 table->table = GNEWVAR (struct expr *, n);
1740 table->set_p = set_p;
1743 /* Free things allocated by alloc_hash_table. */
1745 static void
1746 free_hash_table (struct hash_table_d *table)
1748 free (table->table);
1751 /* Compute the hash TABLE for doing copy/const propagation or
1752 expression hash table. */
1754 static void
1755 compute_hash_table (struct hash_table_d *table)
1757 /* Initialize count of number of entries in hash table. */
1758 table->n_elems = 0;
1759 memset (table->table, 0, table->size * sizeof (struct expr *));
1761 compute_hash_table_work (table);
1764 /* Expression tracking support. */
1766 /* Lookup REGNO in the set TABLE. The result is a pointer to the
1767 table entry, or NULL if not found. */
1769 static struct expr *
1770 lookup_set (unsigned int regno, struct hash_table_d *table)
1772 unsigned int hash = hash_set (regno, table->size);
1773 struct expr *expr;
1775 expr = table->table[hash];
1777 while (expr && REGNO (SET_DEST (expr->expr)) != regno)
1778 expr = expr->next_same_hash;
1780 return expr;
1783 /* Return the next entry for REGNO in list EXPR. */
1785 static struct expr *
1786 next_set (unsigned int regno, struct expr *expr)
1789 expr = expr->next_same_hash;
1790 while (expr && REGNO (SET_DEST (expr->expr)) != regno);
1792 return expr;
1795 /* Like free_INSN_LIST_list or free_EXPR_LIST_list, except that the node
1796 types may be mixed. */
1798 static void
1799 free_insn_expr_list_list (rtx *listp)
1801 rtx list, next;
1803 for (list = *listp; list ; list = next)
1805 next = XEXP (list, 1);
1806 if (GET_CODE (list) == EXPR_LIST)
1807 free_EXPR_LIST_node (list);
1808 else
1809 free_INSN_LIST_node (list);
1812 *listp = NULL;
1815 /* Clear canon_modify_mem_list and modify_mem_list tables. */
1816 static void
1817 clear_modify_mem_tables (void)
1819 unsigned i;
1820 bitmap_iterator bi;
1822 EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
1824 free_INSN_LIST_list (modify_mem_list + i);
1825 free_insn_expr_list_list (canon_modify_mem_list + i);
1827 bitmap_clear (modify_mem_list_set);
1828 bitmap_clear (blocks_with_calls);
1831 /* Release memory used by modify_mem_list_set. */
1833 static void
1834 free_modify_mem_tables (void)
1836 clear_modify_mem_tables ();
1837 free (modify_mem_list);
1838 free (canon_modify_mem_list);
1839 modify_mem_list = 0;
1840 canon_modify_mem_list = 0;
1843 /* Reset tables used to keep track of what's still available [since the
1844 start of the block]. */
1846 static void
1847 reset_opr_set_tables (void)
1849 /* Maintain a bitmap of which regs have been set since beginning of
1850 the block. */
1851 CLEAR_REG_SET (reg_set_bitmap);
1853 /* Also keep a record of the last instruction to modify memory.
1854 For now this is very trivial, we only record whether any memory
1855 location has been modified. */
1856 clear_modify_mem_tables ();
1859 /* Return nonzero if the operands of X are not set before INSN in
1860 INSN's basic block. */
1862 static int
1863 oprs_not_set_p (const_rtx x, const_rtx insn)
1865 int i, j;
1866 enum rtx_code code;
1867 const char *fmt;
1869 if (x == 0)
1870 return 1;
1872 code = GET_CODE (x);
1873 switch (code)
1875 case PC:
1876 case CC0:
1877 case CONST:
1878 case CONST_INT:
1879 case CONST_DOUBLE:
1880 case CONST_FIXED:
1881 case CONST_VECTOR:
1882 case SYMBOL_REF:
1883 case LABEL_REF:
1884 case ADDR_VEC:
1885 case ADDR_DIFF_VEC:
1886 return 1;
1888 case MEM:
1889 if (load_killed_in_block_p (BLOCK_FOR_INSN (insn),
1890 DF_INSN_LUID (insn), x, 0))
1891 return 0;
1892 else
1893 return oprs_not_set_p (XEXP (x, 0), insn);
1895 case REG:
1896 return ! REGNO_REG_SET_P (reg_set_bitmap, REGNO (x));
1898 default:
1899 break;
1902 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1904 if (fmt[i] == 'e')
1906 /* If we are about to do the last recursive call
1907 needed at this level, change it into iteration.
1908 This function is called enough to be worth it. */
1909 if (i == 0)
1910 return oprs_not_set_p (XEXP (x, i), insn);
1912 if (! oprs_not_set_p (XEXP (x, i), insn))
1913 return 0;
1915 else if (fmt[i] == 'E')
1916 for (j = 0; j < XVECLEN (x, i); j++)
1917 if (! oprs_not_set_p (XVECEXP (x, i, j), insn))
1918 return 0;
1921 return 1;
1924 /* Mark things set by a CALL. */
1926 static void
1927 mark_call (rtx insn)
1929 if (! RTL_CONST_OR_PURE_CALL_P (insn))
1930 record_last_mem_set_info (insn);
1933 /* Mark things set by a SET. */
1935 static void
1936 mark_set (rtx pat, rtx insn)
1938 rtx dest = SET_DEST (pat);
1940 while (GET_CODE (dest) == SUBREG
1941 || GET_CODE (dest) == ZERO_EXTRACT
1942 || GET_CODE (dest) == STRICT_LOW_PART)
1943 dest = XEXP (dest, 0);
1945 if (REG_P (dest))
1946 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (dest));
1947 else if (MEM_P (dest))
1948 record_last_mem_set_info (insn);
1950 if (GET_CODE (SET_SRC (pat)) == CALL)
1951 mark_call (insn);
1954 /* Record things set by a CLOBBER. */
1956 static void
1957 mark_clobber (rtx pat, rtx insn)
1959 rtx clob = XEXP (pat, 0);
1961 while (GET_CODE (clob) == SUBREG || GET_CODE (clob) == STRICT_LOW_PART)
1962 clob = XEXP (clob, 0);
1964 if (REG_P (clob))
1965 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (clob));
1966 else
1967 record_last_mem_set_info (insn);
1970 /* Record things set by INSN.
1971 This data is used by oprs_not_set_p. */
1973 static void
1974 mark_oprs_set (rtx insn)
1976 rtx pat = PATTERN (insn);
1977 int i;
1979 if (GET_CODE (pat) == SET)
1980 mark_set (pat, insn);
1981 else if (GET_CODE (pat) == PARALLEL)
1982 for (i = 0; i < XVECLEN (pat, 0); i++)
1984 rtx x = XVECEXP (pat, 0, i);
1986 if (GET_CODE (x) == SET)
1987 mark_set (x, insn);
1988 else if (GET_CODE (x) == CLOBBER)
1989 mark_clobber (x, insn);
1990 else if (GET_CODE (x) == CALL)
1991 mark_call (insn);
1994 else if (GET_CODE (pat) == CLOBBER)
1995 mark_clobber (pat, insn);
1996 else if (GET_CODE (pat) == CALL)
1997 mark_call (insn);
2001 /* Compute copy/constant propagation working variables. */
2003 /* Local properties of assignments. */
2004 static sbitmap *cprop_pavloc;
2005 static sbitmap *cprop_absaltered;
2007 /* Global properties of assignments (computed from the local properties). */
2008 static sbitmap *cprop_avin;
2009 static sbitmap *cprop_avout;
2011 /* Allocate vars used for copy/const propagation. N_BLOCKS is the number of
2012 basic blocks. N_SETS is the number of sets. */
2014 static void
2015 alloc_cprop_mem (int n_blocks, int n_sets)
2017 cprop_pavloc = sbitmap_vector_alloc (n_blocks, n_sets);
2018 cprop_absaltered = sbitmap_vector_alloc (n_blocks, n_sets);
2020 cprop_avin = sbitmap_vector_alloc (n_blocks, n_sets);
2021 cprop_avout = sbitmap_vector_alloc (n_blocks, n_sets);
2024 /* Free vars used by copy/const propagation. */
2026 static void
2027 free_cprop_mem (void)
2029 sbitmap_vector_free (cprop_pavloc);
2030 sbitmap_vector_free (cprop_absaltered);
2031 sbitmap_vector_free (cprop_avin);
2032 sbitmap_vector_free (cprop_avout);
2035 /* For each block, compute whether X is transparent. X is either an
2036 expression or an assignment [though we don't care which, for this context
2037 an assignment is treated as an expression]. For each block where an
2038 element of X is modified, set (SET_P == 1) or reset (SET_P == 0) the INDX
2039 bit in BMAP. */
2041 static void
2042 compute_transp (const_rtx x, int indx, sbitmap *bmap, int set_p)
2044 int i, j;
2045 enum rtx_code code;
2046 const char *fmt;
2048 /* repeat is used to turn tail-recursion into iteration since GCC
2049 can't do it when there's no return value. */
2050 repeat:
2052 if (x == 0)
2053 return;
2055 code = GET_CODE (x);
2056 switch (code)
2058 case REG:
2059 if (set_p)
2061 df_ref def;
2062 for (def = DF_REG_DEF_CHAIN (REGNO (x));
2063 def;
2064 def = DF_REF_NEXT_REG (def))
2065 SET_BIT (bmap[DF_REF_BB (def)->index], indx);
2067 else
2069 df_ref def;
2070 for (def = DF_REG_DEF_CHAIN (REGNO (x));
2071 def;
2072 def = DF_REF_NEXT_REG (def))
2073 RESET_BIT (bmap[DF_REF_BB (def)->index], indx);
2076 return;
2078 case MEM:
2079 if (! MEM_READONLY_P (x))
2081 bitmap_iterator bi;
2082 unsigned bb_index;
2084 /* First handle all the blocks with calls. We don't need to
2085 do any list walking for them. */
2086 EXECUTE_IF_SET_IN_BITMAP (blocks_with_calls, 0, bb_index, bi)
2088 if (set_p)
2089 SET_BIT (bmap[bb_index], indx);
2090 else
2091 RESET_BIT (bmap[bb_index], indx);
2094 /* Now iterate over the blocks which have memory modifications
2095 but which do not have any calls. */
2096 EXECUTE_IF_AND_COMPL_IN_BITMAP (modify_mem_list_set,
2097 blocks_with_calls,
2098 0, bb_index, bi)
2100 rtx list_entry = canon_modify_mem_list[bb_index];
2102 while (list_entry)
2104 rtx dest, dest_addr;
2106 /* LIST_ENTRY must be an INSN of some kind that sets memory.
2107 Examine each hunk of memory that is modified. */
2109 dest = XEXP (list_entry, 0);
2110 list_entry = XEXP (list_entry, 1);
2111 dest_addr = XEXP (list_entry, 0);
2113 if (canon_true_dependence (dest, GET_MODE (dest), dest_addr,
2114 x, NULL_RTX, rtx_addr_varies_p))
2116 if (set_p)
2117 SET_BIT (bmap[bb_index], indx);
2118 else
2119 RESET_BIT (bmap[bb_index], indx);
2120 break;
2122 list_entry = XEXP (list_entry, 1);
2127 x = XEXP (x, 0);
2128 goto repeat;
2130 case PC:
2131 case CC0: /*FIXME*/
2132 case CONST:
2133 case CONST_INT:
2134 case CONST_DOUBLE:
2135 case CONST_FIXED:
2136 case CONST_VECTOR:
2137 case SYMBOL_REF:
2138 case LABEL_REF:
2139 case ADDR_VEC:
2140 case ADDR_DIFF_VEC:
2141 return;
2143 default:
2144 break;
2147 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2149 if (fmt[i] == 'e')
2151 /* If we are about to do the last recursive call
2152 needed at this level, change it into iteration.
2153 This function is called enough to be worth it. */
2154 if (i == 0)
2156 x = XEXP (x, i);
2157 goto repeat;
2160 compute_transp (XEXP (x, i), indx, bmap, set_p);
2162 else if (fmt[i] == 'E')
2163 for (j = 0; j < XVECLEN (x, i); j++)
2164 compute_transp (XVECEXP (x, i, j), indx, bmap, set_p);
2168 /* Top level routine to do the dataflow analysis needed by copy/const
2169 propagation. */
2171 static void
2172 compute_cprop_data (void)
2174 compute_local_properties (cprop_absaltered, cprop_pavloc, NULL, &set_hash_table);
2175 compute_available (cprop_pavloc, cprop_absaltered,
2176 cprop_avout, cprop_avin);
2179 /* Copy/constant propagation. */
2181 /* Maximum number of register uses in an insn that we handle. */
2182 #define MAX_USES 8
2184 /* Table of uses found in an insn.
2185 Allocated statically to avoid alloc/free complexity and overhead. */
2186 static struct reg_use reg_use_table[MAX_USES];
2188 /* Index into `reg_use_table' while building it. */
2189 static int reg_use_count;
2191 /* Set up a list of register numbers used in INSN. The found uses are stored
2192 in `reg_use_table'. `reg_use_count' is initialized to zero before entry,
2193 and contains the number of uses in the table upon exit.
2195 ??? If a register appears multiple times we will record it multiple times.
2196 This doesn't hurt anything but it will slow things down. */
2198 static void
2199 find_used_regs (rtx *xptr, void *data ATTRIBUTE_UNUSED)
2201 int i, j;
2202 enum rtx_code code;
2203 const char *fmt;
2204 rtx x = *xptr;
2206 /* repeat is used to turn tail-recursion into iteration since GCC
2207 can't do it when there's no return value. */
2208 repeat:
2209 if (x == 0)
2210 return;
2212 code = GET_CODE (x);
2213 if (REG_P (x))
2215 if (reg_use_count == MAX_USES)
2216 return;
2218 reg_use_table[reg_use_count].reg_rtx = x;
2219 reg_use_count++;
2222 /* Recursively scan the operands of this expression. */
2224 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2226 if (fmt[i] == 'e')
2228 /* If we are about to do the last recursive call
2229 needed at this level, change it into iteration.
2230 This function is called enough to be worth it. */
2231 if (i == 0)
2233 x = XEXP (x, 0);
2234 goto repeat;
2237 find_used_regs (&XEXP (x, i), data);
2239 else if (fmt[i] == 'E')
2240 for (j = 0; j < XVECLEN (x, i); j++)
2241 find_used_regs (&XVECEXP (x, i, j), data);
2245 /* Try to replace all non-SET_DEST occurrences of FROM in INSN with TO.
2246 Returns nonzero is successful. */
2248 static int
2249 try_replace_reg (rtx from, rtx to, rtx insn)
2251 rtx note = find_reg_equal_equiv_note (insn);
2252 rtx src = 0;
2253 int success = 0;
2254 rtx set = single_set (insn);
2256 /* Usually we substitute easy stuff, so we won't copy everything.
2257 We however need to take care to not duplicate non-trivial CONST
2258 expressions. */
2259 to = copy_rtx (to);
2261 validate_replace_src_group (from, to, insn);
2262 if (num_changes_pending () && apply_change_group ())
2263 success = 1;
2265 /* Try to simplify SET_SRC if we have substituted a constant. */
2266 if (success && set && CONSTANT_P (to))
2268 src = simplify_rtx (SET_SRC (set));
2270 if (src)
2271 validate_change (insn, &SET_SRC (set), src, 0);
2274 /* If there is already a REG_EQUAL note, update the expression in it
2275 with our replacement. */
2276 if (note != 0 && REG_NOTE_KIND (note) == REG_EQUAL)
2277 set_unique_reg_note (insn, REG_EQUAL,
2278 simplify_replace_rtx (XEXP (note, 0), from,
2279 copy_rtx (to)));
2280 if (!success && set && reg_mentioned_p (from, SET_SRC (set)))
2282 /* If above failed and this is a single set, try to simplify the source of
2283 the set given our substitution. We could perhaps try this for multiple
2284 SETs, but it probably won't buy us anything. */
2285 src = simplify_replace_rtx (SET_SRC (set), from, to);
2287 if (!rtx_equal_p (src, SET_SRC (set))
2288 && validate_change (insn, &SET_SRC (set), src, 0))
2289 success = 1;
2291 /* If we've failed to do replacement, have a single SET, don't already
2292 have a note, and have no special SET, add a REG_EQUAL note to not
2293 lose information. */
2294 if (!success && note == 0 && set != 0
2295 && GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
2296 && GET_CODE (SET_DEST (set)) != STRICT_LOW_PART)
2297 note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
2300 /* REG_EQUAL may get simplified into register.
2301 We don't allow that. Remove that note. This code ought
2302 not to happen, because previous code ought to synthesize
2303 reg-reg move, but be on the safe side. */
2304 if (note && REG_NOTE_KIND (note) == REG_EQUAL && REG_P (XEXP (note, 0)))
2305 remove_note (insn, note);
2307 return success;
2310 /* Find a set of REGNOs that are available on entry to INSN's block. Returns
2311 NULL no such set is found. */
2313 static struct expr *
2314 find_avail_set (int regno, rtx insn)
2316 /* SET1 contains the last set found that can be returned to the caller for
2317 use in a substitution. */
2318 struct expr *set1 = 0;
2320 /* Loops are not possible here. To get a loop we would need two sets
2321 available at the start of the block containing INSN. i.e. we would
2322 need two sets like this available at the start of the block:
2324 (set (reg X) (reg Y))
2325 (set (reg Y) (reg X))
2327 This can not happen since the set of (reg Y) would have killed the
2328 set of (reg X) making it unavailable at the start of this block. */
2329 while (1)
2331 rtx src;
2332 struct expr *set = lookup_set (regno, &set_hash_table);
2334 /* Find a set that is available at the start of the block
2335 which contains INSN. */
2336 while (set)
2338 if (TEST_BIT (cprop_avin[BLOCK_NUM (insn)], set->bitmap_index))
2339 break;
2340 set = next_set (regno, set);
2343 /* If no available set was found we've reached the end of the
2344 (possibly empty) copy chain. */
2345 if (set == 0)
2346 break;
2348 gcc_assert (GET_CODE (set->expr) == SET);
2350 src = SET_SRC (set->expr);
2352 /* We know the set is available.
2353 Now check that SRC is ANTLOC (i.e. none of the source operands
2354 have changed since the start of the block).
2356 If the source operand changed, we may still use it for the next
2357 iteration of this loop, but we may not use it for substitutions. */
2359 if (gcse_constant_p (src) || oprs_not_set_p (src, insn))
2360 set1 = set;
2362 /* If the source of the set is anything except a register, then
2363 we have reached the end of the copy chain. */
2364 if (! REG_P (src))
2365 break;
2367 /* Follow the copy chain, i.e. start another iteration of the loop
2368 and see if we have an available copy into SRC. */
2369 regno = REGNO (src);
2372 /* SET1 holds the last set that was available and anticipatable at
2373 INSN. */
2374 return set1;
2377 /* Subroutine of cprop_insn that tries to propagate constants into
2378 JUMP_INSNS. JUMP must be a conditional jump. If SETCC is non-NULL
2379 it is the instruction that immediately precedes JUMP, and must be a
2380 single SET of a register. FROM is what we will try to replace,
2381 SRC is the constant we will try to substitute for it. Returns nonzero
2382 if a change was made. */
2384 static int
2385 cprop_jump (basic_block bb, rtx setcc, rtx jump, rtx from, rtx src)
2387 rtx new_rtx, set_src, note_src;
2388 rtx set = pc_set (jump);
2389 rtx note = find_reg_equal_equiv_note (jump);
2391 if (note)
2393 note_src = XEXP (note, 0);
2394 if (GET_CODE (note_src) == EXPR_LIST)
2395 note_src = NULL_RTX;
2397 else note_src = NULL_RTX;
2399 /* Prefer REG_EQUAL notes except those containing EXPR_LISTs. */
2400 set_src = note_src ? note_src : SET_SRC (set);
2402 /* First substitute the SETCC condition into the JUMP instruction,
2403 then substitute that given values into this expanded JUMP. */
2404 if (setcc != NULL_RTX
2405 && !modified_between_p (from, setcc, jump)
2406 && !modified_between_p (src, setcc, jump))
2408 rtx setcc_src;
2409 rtx setcc_set = single_set (setcc);
2410 rtx setcc_note = find_reg_equal_equiv_note (setcc);
2411 setcc_src = (setcc_note && GET_CODE (XEXP (setcc_note, 0)) != EXPR_LIST)
2412 ? XEXP (setcc_note, 0) : SET_SRC (setcc_set);
2413 set_src = simplify_replace_rtx (set_src, SET_DEST (setcc_set),
2414 setcc_src);
2416 else
2417 setcc = NULL_RTX;
2419 new_rtx = simplify_replace_rtx (set_src, from, src);
2421 /* If no simplification can be made, then try the next register. */
2422 if (rtx_equal_p (new_rtx, SET_SRC (set)))
2423 return 0;
2425 /* If this is now a no-op delete it, otherwise this must be a valid insn. */
2426 if (new_rtx == pc_rtx)
2427 delete_insn (jump);
2428 else
2430 /* Ensure the value computed inside the jump insn to be equivalent
2431 to one computed by setcc. */
2432 if (setcc && modified_in_p (new_rtx, setcc))
2433 return 0;
2434 if (! validate_unshare_change (jump, &SET_SRC (set), new_rtx, 0))
2436 /* When (some) constants are not valid in a comparison, and there
2437 are two registers to be replaced by constants before the entire
2438 comparison can be folded into a constant, we need to keep
2439 intermediate information in REG_EQUAL notes. For targets with
2440 separate compare insns, such notes are added by try_replace_reg.
2441 When we have a combined compare-and-branch instruction, however,
2442 we need to attach a note to the branch itself to make this
2443 optimization work. */
2445 if (!rtx_equal_p (new_rtx, note_src))
2446 set_unique_reg_note (jump, REG_EQUAL, copy_rtx (new_rtx));
2447 return 0;
2450 /* Remove REG_EQUAL note after simplification. */
2451 if (note_src)
2452 remove_note (jump, note);
2455 #ifdef HAVE_cc0
2456 /* Delete the cc0 setter. */
2457 if (setcc != NULL && CC0_P (SET_DEST (single_set (setcc))))
2458 delete_insn (setcc);
2459 #endif
2461 global_const_prop_count++;
2462 if (dump_file != NULL)
2464 fprintf (dump_file,
2465 "GLOBAL CONST-PROP: Replacing reg %d in jump_insn %d with constant ",
2466 REGNO (from), INSN_UID (jump));
2467 print_rtl (dump_file, src);
2468 fprintf (dump_file, "\n");
2470 purge_dead_edges (bb);
2472 /* If a conditional jump has been changed into unconditional jump, remove
2473 the jump and make the edge fallthru - this is always called in
2474 cfglayout mode. */
2475 if (new_rtx != pc_rtx && simplejump_p (jump))
2477 edge e;
2478 edge_iterator ei;
2480 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ei_next (&ei))
2481 if (e->dest != EXIT_BLOCK_PTR
2482 && BB_HEAD (e->dest) == JUMP_LABEL (jump))
2484 e->flags |= EDGE_FALLTHRU;
2485 break;
2487 delete_insn (jump);
2490 return 1;
2493 static bool
2494 constprop_register (rtx insn, rtx from, rtx to)
2496 rtx sset;
2498 /* Check for reg or cc0 setting instructions followed by
2499 conditional branch instructions first. */
2500 if ((sset = single_set (insn)) != NULL
2501 && NEXT_INSN (insn)
2502 && any_condjump_p (NEXT_INSN (insn)) && onlyjump_p (NEXT_INSN (insn)))
2504 rtx dest = SET_DEST (sset);
2505 if ((REG_P (dest) || CC0_P (dest))
2506 && cprop_jump (BLOCK_FOR_INSN (insn), insn, NEXT_INSN (insn), from, to))
2507 return 1;
2510 /* Handle normal insns next. */
2511 if (NONJUMP_INSN_P (insn)
2512 && try_replace_reg (from, to, insn))
2513 return 1;
2515 /* Try to propagate a CONST_INT into a conditional jump.
2516 We're pretty specific about what we will handle in this
2517 code, we can extend this as necessary over time.
2519 Right now the insn in question must look like
2520 (set (pc) (if_then_else ...)) */
2521 else if (any_condjump_p (insn) && onlyjump_p (insn))
2522 return cprop_jump (BLOCK_FOR_INSN (insn), NULL, insn, from, to);
2523 return 0;
2526 /* Perform constant and copy propagation on INSN.
2527 The result is nonzero if a change was made. */
2529 static int
2530 cprop_insn (rtx insn)
2532 struct reg_use *reg_used;
2533 int changed = 0;
2534 rtx note;
2536 if (!INSN_P (insn))
2537 return 0;
2539 reg_use_count = 0;
2540 note_uses (&PATTERN (insn), find_used_regs, NULL);
2542 note = find_reg_equal_equiv_note (insn);
2544 /* We may win even when propagating constants into notes. */
2545 if (note)
2546 find_used_regs (&XEXP (note, 0), NULL);
2548 for (reg_used = &reg_use_table[0]; reg_use_count > 0;
2549 reg_used++, reg_use_count--)
2551 unsigned int regno = REGNO (reg_used->reg_rtx);
2552 rtx pat, src;
2553 struct expr *set;
2555 /* If the register has already been set in this block, there's
2556 nothing we can do. */
2557 if (! oprs_not_set_p (reg_used->reg_rtx, insn))
2558 continue;
2560 /* Find an assignment that sets reg_used and is available
2561 at the start of the block. */
2562 set = find_avail_set (regno, insn);
2563 if (! set)
2564 continue;
2566 pat = set->expr;
2567 /* ??? We might be able to handle PARALLELs. Later. */
2568 gcc_assert (GET_CODE (pat) == SET);
2570 src = SET_SRC (pat);
2572 /* Constant propagation. */
2573 if (gcse_constant_p (src))
2575 if (constprop_register (insn, reg_used->reg_rtx, src))
2577 changed = 1;
2578 global_const_prop_count++;
2579 if (dump_file != NULL)
2581 fprintf (dump_file, "GLOBAL CONST-PROP: Replacing reg %d in ", regno);
2582 fprintf (dump_file, "insn %d with constant ", INSN_UID (insn));
2583 print_rtl (dump_file, src);
2584 fprintf (dump_file, "\n");
2586 if (INSN_DELETED_P (insn))
2587 return 1;
2590 else if (REG_P (src)
2591 && REGNO (src) >= FIRST_PSEUDO_REGISTER
2592 && REGNO (src) != regno)
2594 if (try_replace_reg (reg_used->reg_rtx, src, insn))
2596 changed = 1;
2597 global_copy_prop_count++;
2598 if (dump_file != NULL)
2600 fprintf (dump_file, "GLOBAL COPY-PROP: Replacing reg %d in insn %d",
2601 regno, INSN_UID (insn));
2602 fprintf (dump_file, " with reg %d\n", REGNO (src));
2605 /* The original insn setting reg_used may or may not now be
2606 deletable. We leave the deletion to flow. */
2607 /* FIXME: If it turns out that the insn isn't deletable,
2608 then we may have unnecessarily extended register lifetimes
2609 and made things worse. */
2614 if (changed && DEBUG_INSN_P (insn))
2615 return 0;
2617 return changed;
2620 /* Like find_used_regs, but avoid recording uses that appear in
2621 input-output contexts such as zero_extract or pre_dec. This
2622 restricts the cases we consider to those for which local cprop
2623 can legitimately make replacements. */
2625 static void
2626 local_cprop_find_used_regs (rtx *xptr, void *data)
2628 rtx x = *xptr;
2630 if (x == 0)
2631 return;
2633 switch (GET_CODE (x))
2635 case ZERO_EXTRACT:
2636 case SIGN_EXTRACT:
2637 case STRICT_LOW_PART:
2638 return;
2640 case PRE_DEC:
2641 case PRE_INC:
2642 case POST_DEC:
2643 case POST_INC:
2644 case PRE_MODIFY:
2645 case POST_MODIFY:
2646 /* Can only legitimately appear this early in the context of
2647 stack pushes for function arguments, but handle all of the
2648 codes nonetheless. */
2649 return;
2651 case SUBREG:
2652 /* Setting a subreg of a register larger than word_mode leaves
2653 the non-written words unchanged. */
2654 if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) > BITS_PER_WORD)
2655 return;
2656 break;
2658 default:
2659 break;
2662 find_used_regs (xptr, data);
2665 /* Try to perform local const/copy propagation on X in INSN. */
2667 static bool
2668 do_local_cprop (rtx x, rtx insn)
2670 rtx newreg = NULL, newcnst = NULL;
2672 /* Rule out USE instructions and ASM statements as we don't want to
2673 change the hard registers mentioned. */
2674 if (REG_P (x)
2675 && (REGNO (x) >= FIRST_PSEUDO_REGISTER
2676 || (GET_CODE (PATTERN (insn)) != USE
2677 && asm_noperands (PATTERN (insn)) < 0)))
2679 cselib_val *val = cselib_lookup (x, GET_MODE (x), 0);
2680 struct elt_loc_list *l;
2682 if (!val)
2683 return false;
2684 for (l = val->locs; l; l = l->next)
2686 rtx this_rtx = l->loc;
2687 rtx note;
2689 if (gcse_constant_p (this_rtx))
2690 newcnst = this_rtx;
2691 if (REG_P (this_rtx) && REGNO (this_rtx) >= FIRST_PSEUDO_REGISTER
2692 /* Don't copy propagate if it has attached REG_EQUIV note.
2693 At this point this only function parameters should have
2694 REG_EQUIV notes and if the argument slot is used somewhere
2695 explicitly, it means address of parameter has been taken,
2696 so we should not extend the lifetime of the pseudo. */
2697 && (!(note = find_reg_note (l->setting_insn, REG_EQUIV, NULL_RTX))
2698 || ! MEM_P (XEXP (note, 0))))
2699 newreg = this_rtx;
2701 if (newcnst && constprop_register (insn, x, newcnst))
2703 if (dump_file != NULL)
2705 fprintf (dump_file, "LOCAL CONST-PROP: Replacing reg %d in ",
2706 REGNO (x));
2707 fprintf (dump_file, "insn %d with constant ",
2708 INSN_UID (insn));
2709 print_rtl (dump_file, newcnst);
2710 fprintf (dump_file, "\n");
2712 local_const_prop_count++;
2713 return true;
2715 else if (newreg && newreg != x && try_replace_reg (x, newreg, insn))
2717 if (dump_file != NULL)
2719 fprintf (dump_file,
2720 "LOCAL COPY-PROP: Replacing reg %d in insn %d",
2721 REGNO (x), INSN_UID (insn));
2722 fprintf (dump_file, " with reg %d\n", REGNO (newreg));
2724 local_copy_prop_count++;
2725 return true;
2728 return false;
2731 /* Do local const/copy propagation (i.e. within each basic block). */
2733 static int
2734 local_cprop_pass (void)
2736 basic_block bb;
2737 rtx insn;
2738 struct reg_use *reg_used;
2739 bool changed = false;
2741 cselib_init (false);
2742 FOR_EACH_BB (bb)
2744 FOR_BB_INSNS (bb, insn)
2746 if (INSN_P (insn))
2748 rtx note = find_reg_equal_equiv_note (insn);
2751 reg_use_count = 0;
2752 note_uses (&PATTERN (insn), local_cprop_find_used_regs,
2753 NULL);
2754 if (note)
2755 local_cprop_find_used_regs (&XEXP (note, 0), NULL);
2757 for (reg_used = &reg_use_table[0]; reg_use_count > 0;
2758 reg_used++, reg_use_count--)
2760 if (do_local_cprop (reg_used->reg_rtx, insn))
2762 changed = true;
2763 break;
2766 if (INSN_DELETED_P (insn))
2767 break;
2769 while (reg_use_count);
2771 cselib_process_insn (insn);
2774 /* Forget everything at the end of a basic block. */
2775 cselib_clear_table ();
2778 cselib_finish ();
2780 return changed;
2783 /* Similar to get_condition, only the resulting condition must be
2784 valid at JUMP, instead of at EARLIEST.
2786 This differs from noce_get_condition in ifcvt.c in that we prefer not to
2787 settle for the condition variable in the jump instruction being integral.
2788 We prefer to be able to record the value of a user variable, rather than
2789 the value of a temporary used in a condition. This could be solved by
2790 recording the value of *every* register scanned by canonicalize_condition,
2791 but this would require some code reorganization. */
2794 fis_get_condition (rtx jump)
2796 return get_condition (jump, NULL, false, true);
2799 /* Check the comparison COND to see if we can safely form an implicit set from
2800 it. COND is either an EQ or NE comparison. */
2802 static bool
2803 implicit_set_cond_p (const_rtx cond)
2805 const enum machine_mode mode = GET_MODE (XEXP (cond, 0));
2806 const_rtx cst = XEXP (cond, 1);
2808 /* We can't perform this optimization if either operand might be or might
2809 contain a signed zero. */
2810 if (HONOR_SIGNED_ZEROS (mode))
2812 /* It is sufficient to check if CST is or contains a zero. We must
2813 handle float, complex, and vector. If any subpart is a zero, then
2814 the optimization can't be performed. */
2815 /* ??? The complex and vector checks are not implemented yet. We just
2816 always return zero for them. */
2817 if (GET_CODE (cst) == CONST_DOUBLE)
2819 REAL_VALUE_TYPE d;
2820 REAL_VALUE_FROM_CONST_DOUBLE (d, cst);
2821 if (REAL_VALUES_EQUAL (d, dconst0))
2822 return 0;
2824 else
2825 return 0;
2828 return gcse_constant_p (cst);
2831 /* Find the implicit sets of a function. An "implicit set" is a constraint
2832 on the value of a variable, implied by a conditional jump. For example,
2833 following "if (x == 2)", the then branch may be optimized as though the
2834 conditional performed an "explicit set", in this example, "x = 2". This
2835 function records the set patterns that are implicit at the start of each
2836 basic block.
2838 FIXME: This would be more effective if critical edges are pre-split. As
2839 it is now, we can't record implicit sets for blocks that have
2840 critical successor edges. This results in missed optimizations
2841 and in more (unnecessary) work in cfgcleanup.c:thread_jump(). */
2843 static void
2844 find_implicit_sets (void)
2846 basic_block bb, dest;
2847 unsigned int count;
2848 rtx cond, new_rtx;
2850 count = 0;
2851 FOR_EACH_BB (bb)
2852 /* Check for more than one successor. */
2853 if (EDGE_COUNT (bb->succs) > 1)
2855 cond = fis_get_condition (BB_END (bb));
2857 if (cond
2858 && (GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
2859 && REG_P (XEXP (cond, 0))
2860 && REGNO (XEXP (cond, 0)) >= FIRST_PSEUDO_REGISTER
2861 && implicit_set_cond_p (cond))
2863 dest = GET_CODE (cond) == EQ ? BRANCH_EDGE (bb)->dest
2864 : FALLTHRU_EDGE (bb)->dest;
2866 if (dest
2867 /* Record nothing for a critical edge. */
2868 && single_pred_p (dest)
2869 && dest != EXIT_BLOCK_PTR)
2871 new_rtx = gen_rtx_SET (VOIDmode, XEXP (cond, 0),
2872 XEXP (cond, 1));
2873 implicit_sets[dest->index] = new_rtx;
2874 if (dump_file)
2876 fprintf(dump_file, "Implicit set of reg %d in ",
2877 REGNO (XEXP (cond, 0)));
2878 fprintf(dump_file, "basic block %d\n", dest->index);
2880 count++;
2885 if (dump_file)
2886 fprintf (dump_file, "Found %d implicit sets\n", count);
2889 /* Bypass conditional jumps. */
2891 /* The value of last_basic_block at the beginning of the jump_bypass
2892 pass. The use of redirect_edge_and_branch_force may introduce new
2893 basic blocks, but the data flow analysis is only valid for basic
2894 block indices less than bypass_last_basic_block. */
2896 static int bypass_last_basic_block;
2898 /* Find a set of REGNO to a constant that is available at the end of basic
2899 block BB. Returns NULL if no such set is found. Based heavily upon
2900 find_avail_set. */
2902 static struct expr *
2903 find_bypass_set (int regno, int bb)
2905 struct expr *result = 0;
2907 for (;;)
2909 rtx src;
2910 struct expr *set = lookup_set (regno, &set_hash_table);
2912 while (set)
2914 if (TEST_BIT (cprop_avout[bb], set->bitmap_index))
2915 break;
2916 set = next_set (regno, set);
2919 if (set == 0)
2920 break;
2922 gcc_assert (GET_CODE (set->expr) == SET);
2924 src = SET_SRC (set->expr);
2925 if (gcse_constant_p (src))
2926 result = set;
2928 if (! REG_P (src))
2929 break;
2931 regno = REGNO (src);
2933 return result;
2937 /* Subroutine of bypass_block that checks whether a pseudo is killed by
2938 any of the instructions inserted on an edge. Jump bypassing places
2939 condition code setters on CFG edges using insert_insn_on_edge. This
2940 function is required to check that our data flow analysis is still
2941 valid prior to commit_edge_insertions. */
2943 static bool
2944 reg_killed_on_edge (const_rtx reg, const_edge e)
2946 rtx insn;
2948 for (insn = e->insns.r; insn; insn = NEXT_INSN (insn))
2949 if (INSN_P (insn) && reg_set_p (reg, insn))
2950 return true;
2952 return false;
2955 /* Subroutine of bypass_conditional_jumps that attempts to bypass the given
2956 basic block BB which has more than one predecessor. If not NULL, SETCC
2957 is the first instruction of BB, which is immediately followed by JUMP_INSN
2958 JUMP. Otherwise, SETCC is NULL, and JUMP is the first insn of BB.
2959 Returns nonzero if a change was made.
2961 During the jump bypassing pass, we may place copies of SETCC instructions
2962 on CFG edges. The following routine must be careful to pay attention to
2963 these inserted insns when performing its transformations. */
2965 static int
2966 bypass_block (basic_block bb, rtx setcc, rtx jump)
2968 rtx insn, note;
2969 edge e, edest;
2970 int i, change;
2971 int may_be_loop_header;
2972 unsigned removed_p;
2973 edge_iterator ei;
2975 insn = (setcc != NULL) ? setcc : jump;
2977 /* Determine set of register uses in INSN. */
2978 reg_use_count = 0;
2979 note_uses (&PATTERN (insn), find_used_regs, NULL);
2980 note = find_reg_equal_equiv_note (insn);
2981 if (note)
2982 find_used_regs (&XEXP (note, 0), NULL);
2984 may_be_loop_header = false;
2985 FOR_EACH_EDGE (e, ei, bb->preds)
2986 if (e->flags & EDGE_DFS_BACK)
2988 may_be_loop_header = true;
2989 break;
2992 change = 0;
2993 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
2995 removed_p = 0;
2997 if (e->flags & EDGE_COMPLEX)
2999 ei_next (&ei);
3000 continue;
3003 /* We can't redirect edges from new basic blocks. */
3004 if (e->src->index >= bypass_last_basic_block)
3006 ei_next (&ei);
3007 continue;
3010 /* The irreducible loops created by redirecting of edges entering the
3011 loop from outside would decrease effectiveness of some of the following
3012 optimizations, so prevent this. */
3013 if (may_be_loop_header
3014 && !(e->flags & EDGE_DFS_BACK))
3016 ei_next (&ei);
3017 continue;
3020 for (i = 0; i < reg_use_count; i++)
3022 struct reg_use *reg_used = &reg_use_table[i];
3023 unsigned int regno = REGNO (reg_used->reg_rtx);
3024 basic_block dest, old_dest;
3025 struct expr *set;
3026 rtx src, new_rtx;
3028 set = find_bypass_set (regno, e->src->index);
3030 if (! set)
3031 continue;
3033 /* Check the data flow is valid after edge insertions. */
3034 if (e->insns.r && reg_killed_on_edge (reg_used->reg_rtx, e))
3035 continue;
3037 src = SET_SRC (pc_set (jump));
3039 if (setcc != NULL)
3040 src = simplify_replace_rtx (src,
3041 SET_DEST (PATTERN (setcc)),
3042 SET_SRC (PATTERN (setcc)));
3044 new_rtx = simplify_replace_rtx (src, reg_used->reg_rtx,
3045 SET_SRC (set->expr));
3047 /* Jump bypassing may have already placed instructions on
3048 edges of the CFG. We can't bypass an outgoing edge that
3049 has instructions associated with it, as these insns won't
3050 get executed if the incoming edge is redirected. */
3052 if (new_rtx == pc_rtx)
3054 edest = FALLTHRU_EDGE (bb);
3055 dest = edest->insns.r ? NULL : edest->dest;
3057 else if (GET_CODE (new_rtx) == LABEL_REF)
3059 dest = BLOCK_FOR_INSN (XEXP (new_rtx, 0));
3060 /* Don't bypass edges containing instructions. */
3061 edest = find_edge (bb, dest);
3062 if (edest && edest->insns.r)
3063 dest = NULL;
3065 else
3066 dest = NULL;
3068 /* Avoid unification of the edge with other edges from original
3069 branch. We would end up emitting the instruction on "both"
3070 edges. */
3072 if (dest && setcc && !CC0_P (SET_DEST (PATTERN (setcc)))
3073 && find_edge (e->src, dest))
3074 dest = NULL;
3076 old_dest = e->dest;
3077 if (dest != NULL
3078 && dest != old_dest
3079 && dest != EXIT_BLOCK_PTR)
3081 redirect_edge_and_branch_force (e, dest);
3083 /* Copy the register setter to the redirected edge.
3084 Don't copy CC0 setters, as CC0 is dead after jump. */
3085 if (setcc)
3087 rtx pat = PATTERN (setcc);
3088 if (!CC0_P (SET_DEST (pat)))
3089 insert_insn_on_edge (copy_insn (pat), e);
3092 if (dump_file != NULL)
3094 fprintf (dump_file, "JUMP-BYPASS: Proved reg %d "
3095 "in jump_insn %d equals constant ",
3096 regno, INSN_UID (jump));
3097 print_rtl (dump_file, SET_SRC (set->expr));
3098 fprintf (dump_file, "\nBypass edge from %d->%d to %d\n",
3099 e->src->index, old_dest->index, dest->index);
3101 change = 1;
3102 removed_p = 1;
3103 break;
3106 if (!removed_p)
3107 ei_next (&ei);
3109 return change;
3112 /* Find basic blocks with more than one predecessor that only contain a
3113 single conditional jump. If the result of the comparison is known at
3114 compile-time from any incoming edge, redirect that edge to the
3115 appropriate target. Returns nonzero if a change was made.
3117 This function is now mis-named, because we also handle indirect jumps. */
3119 static int
3120 bypass_conditional_jumps (void)
3122 basic_block bb;
3123 int changed;
3124 rtx setcc;
3125 rtx insn;
3126 rtx dest;
3128 /* Note we start at block 1. */
3129 if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
3130 return 0;
3132 bypass_last_basic_block = last_basic_block;
3133 mark_dfs_back_edges ();
3135 changed = 0;
3136 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb,
3137 EXIT_BLOCK_PTR, next_bb)
3139 /* Check for more than one predecessor. */
3140 if (!single_pred_p (bb))
3142 setcc = NULL_RTX;
3143 FOR_BB_INSNS (bb, insn)
3144 if (DEBUG_INSN_P (insn))
3145 continue;
3146 else if (NONJUMP_INSN_P (insn))
3148 if (setcc)
3149 break;
3150 if (GET_CODE (PATTERN (insn)) != SET)
3151 break;
3153 dest = SET_DEST (PATTERN (insn));
3154 if (REG_P (dest) || CC0_P (dest))
3155 setcc = insn;
3156 else
3157 break;
3159 else if (JUMP_P (insn))
3161 if ((any_condjump_p (insn) || computed_jump_p (insn))
3162 && onlyjump_p (insn))
3163 changed |= bypass_block (bb, setcc, insn);
3164 break;
3166 else if (INSN_P (insn))
3167 break;
3171 /* If we bypassed any register setting insns, we inserted a
3172 copy on the redirected edge. These need to be committed. */
3173 if (changed)
3174 commit_edge_insertions ();
3176 return changed;
3179 /* Compute PRE+LCM working variables. */
3181 /* Local properties of expressions. */
3182 /* Nonzero for expressions that are transparent in the block. */
3183 static sbitmap *transp;
3185 /* Nonzero for expressions that are transparent at the end of the block.
3186 This is only zero for expressions killed by abnormal critical edge
3187 created by a calls. */
3188 static sbitmap *transpout;
3190 /* Nonzero for expressions that are computed (available) in the block. */
3191 static sbitmap *comp;
3193 /* Nonzero for expressions that are locally anticipatable in the block. */
3194 static sbitmap *antloc;
3196 /* Nonzero for expressions where this block is an optimal computation
3197 point. */
3198 static sbitmap *pre_optimal;
3200 /* Nonzero for expressions which are redundant in a particular block. */
3201 static sbitmap *pre_redundant;
3203 /* Nonzero for expressions which should be inserted on a specific edge. */
3204 static sbitmap *pre_insert_map;
3206 /* Nonzero for expressions which should be deleted in a specific block. */
3207 static sbitmap *pre_delete_map;
3209 /* Contains the edge_list returned by pre_edge_lcm. */
3210 static struct edge_list *edge_list;
3212 /* Allocate vars used for PRE analysis. */
3214 static void
3215 alloc_pre_mem (int n_blocks, int n_exprs)
3217 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
3218 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
3219 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
3221 pre_optimal = NULL;
3222 pre_redundant = NULL;
3223 pre_insert_map = NULL;
3224 pre_delete_map = NULL;
3225 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
3227 /* pre_insert and pre_delete are allocated later. */
3230 /* Free vars used for PRE analysis. */
3232 static void
3233 free_pre_mem (void)
3235 sbitmap_vector_free (transp);
3236 sbitmap_vector_free (comp);
3238 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
3240 if (pre_optimal)
3241 sbitmap_vector_free (pre_optimal);
3242 if (pre_redundant)
3243 sbitmap_vector_free (pre_redundant);
3244 if (pre_insert_map)
3245 sbitmap_vector_free (pre_insert_map);
3246 if (pre_delete_map)
3247 sbitmap_vector_free (pre_delete_map);
3249 transp = comp = NULL;
3250 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
3253 /* Top level routine to do the dataflow analysis needed by PRE. */
3255 static void
3256 compute_pre_data (void)
3258 sbitmap trapping_expr;
3259 basic_block bb;
3260 unsigned int ui;
3262 compute_local_properties (transp, comp, antloc, &expr_hash_table);
3263 sbitmap_vector_zero (ae_kill, last_basic_block);
3265 /* Collect expressions which might trap. */
3266 trapping_expr = sbitmap_alloc (expr_hash_table.n_elems);
3267 sbitmap_zero (trapping_expr);
3268 for (ui = 0; ui < expr_hash_table.size; ui++)
3270 struct expr *e;
3271 for (e = expr_hash_table.table[ui]; e != NULL; e = e->next_same_hash)
3272 if (may_trap_p (e->expr))
3273 SET_BIT (trapping_expr, e->bitmap_index);
3276 /* Compute ae_kill for each basic block using:
3278 ~(TRANSP | COMP)
3281 FOR_EACH_BB (bb)
3283 edge e;
3284 edge_iterator ei;
3286 /* If the current block is the destination of an abnormal edge, we
3287 kill all trapping expressions because we won't be able to properly
3288 place the instruction on the edge. So make them neither
3289 anticipatable nor transparent. This is fairly conservative. */
3290 FOR_EACH_EDGE (e, ei, bb->preds)
3291 if (e->flags & EDGE_ABNORMAL)
3293 sbitmap_difference (antloc[bb->index], antloc[bb->index], trapping_expr);
3294 sbitmap_difference (transp[bb->index], transp[bb->index], trapping_expr);
3295 break;
3298 sbitmap_a_or_b (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
3299 sbitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
3302 edge_list = pre_edge_lcm (expr_hash_table.n_elems, transp, comp, antloc,
3303 ae_kill, &pre_insert_map, &pre_delete_map);
3304 sbitmap_vector_free (antloc);
3305 antloc = NULL;
3306 sbitmap_vector_free (ae_kill);
3307 ae_kill = NULL;
3308 sbitmap_free (trapping_expr);
3311 /* PRE utilities */
3313 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
3314 block BB.
3316 VISITED is a pointer to a working buffer for tracking which BB's have
3317 been visited. It is NULL for the top-level call.
3319 We treat reaching expressions that go through blocks containing the same
3320 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
3321 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
3322 2 as not reaching. The intent is to improve the probability of finding
3323 only one reaching expression and to reduce register lifetimes by picking
3324 the closest such expression. */
3326 static int
3327 pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr, basic_block bb, char *visited)
3329 edge pred;
3330 edge_iterator ei;
3332 FOR_EACH_EDGE (pred, ei, bb->preds)
3334 basic_block pred_bb = pred->src;
3336 if (pred->src == ENTRY_BLOCK_PTR
3337 /* Has predecessor has already been visited? */
3338 || visited[pred_bb->index])
3339 ;/* Nothing to do. */
3341 /* Does this predecessor generate this expression? */
3342 else if (TEST_BIT (comp[pred_bb->index], expr->bitmap_index))
3344 /* Is this the occurrence we're looking for?
3345 Note that there's only one generating occurrence per block
3346 so we just need to check the block number. */
3347 if (occr_bb == pred_bb)
3348 return 1;
3350 visited[pred_bb->index] = 1;
3352 /* Ignore this predecessor if it kills the expression. */
3353 else if (! TEST_BIT (transp[pred_bb->index], expr->bitmap_index))
3354 visited[pred_bb->index] = 1;
3356 /* Neither gen nor kill. */
3357 else
3359 visited[pred_bb->index] = 1;
3360 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
3361 return 1;
3365 /* All paths have been checked. */
3366 return 0;
3369 /* The wrapper for pre_expr_reaches_here_work that ensures that any
3370 memory allocated for that function is returned. */
3372 static int
3373 pre_expr_reaches_here_p (basic_block occr_bb, struct expr *expr, basic_block bb)
3375 int rval;
3376 char *visited = XCNEWVEC (char, last_basic_block);
3378 rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
3380 free (visited);
3381 return rval;
3385 /* Given an expr, generate RTL which we can insert at the end of a BB,
3386 or on an edge. Set the block number of any insns generated to
3387 the value of BB. */
3389 static rtx
3390 process_insert_insn (struct expr *expr)
3392 rtx reg = expr->reaching_reg;
3393 rtx exp = copy_rtx (expr->expr);
3394 rtx pat;
3396 start_sequence ();
3398 /* If the expression is something that's an operand, like a constant,
3399 just copy it to a register. */
3400 if (general_operand (exp, GET_MODE (reg)))
3401 emit_move_insn (reg, exp);
3403 /* Otherwise, make a new insn to compute this expression and make sure the
3404 insn will be recognized (this also adds any needed CLOBBERs). Copy the
3405 expression to make sure we don't have any sharing issues. */
3406 else
3408 rtx insn = emit_insn (gen_rtx_SET (VOIDmode, reg, exp));
3410 if (insn_invalid_p (insn))
3411 gcc_unreachable ();
3415 pat = get_insns ();
3416 end_sequence ();
3418 return pat;
3421 /* Add EXPR to the end of basic block BB.
3423 This is used by both the PRE and code hoisting.
3425 For PRE, we want to verify that the expr is either transparent
3426 or locally anticipatable in the target block. This check makes
3427 no sense for code hoisting. */
3429 static void
3430 insert_insn_end_basic_block (struct expr *expr, basic_block bb, int pre)
3432 rtx insn = BB_END (bb);
3433 rtx new_insn;
3434 rtx reg = expr->reaching_reg;
3435 int regno = REGNO (reg);
3436 rtx pat, pat_end;
3438 pat = process_insert_insn (expr);
3439 gcc_assert (pat && INSN_P (pat));
3441 pat_end = pat;
3442 while (NEXT_INSN (pat_end) != NULL_RTX)
3443 pat_end = NEXT_INSN (pat_end);
3445 /* If the last insn is a jump, insert EXPR in front [taking care to
3446 handle cc0, etc. properly]. Similarly we need to care trapping
3447 instructions in presence of non-call exceptions. */
3449 if (JUMP_P (insn)
3450 || (NONJUMP_INSN_P (insn)
3451 && (!single_succ_p (bb)
3452 || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
3454 #ifdef HAVE_cc0
3455 rtx note;
3456 #endif
3457 /* It should always be the case that we can put these instructions
3458 anywhere in the basic block with performing PRE optimizations.
3459 Check this. */
3460 gcc_assert (!NONJUMP_INSN_P (insn) || !pre
3461 || TEST_BIT (antloc[bb->index], expr->bitmap_index)
3462 || TEST_BIT (transp[bb->index], expr->bitmap_index));
3464 /* If this is a jump table, then we can't insert stuff here. Since
3465 we know the previous real insn must be the tablejump, we insert
3466 the new instruction just before the tablejump. */
3467 if (GET_CODE (PATTERN (insn)) == ADDR_VEC
3468 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
3469 insn = prev_real_insn (insn);
3471 #ifdef HAVE_cc0
3472 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
3473 if cc0 isn't set. */
3474 note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
3475 if (note)
3476 insn = XEXP (note, 0);
3477 else
3479 rtx maybe_cc0_setter = prev_nonnote_insn (insn);
3480 if (maybe_cc0_setter
3481 && INSN_P (maybe_cc0_setter)
3482 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
3483 insn = maybe_cc0_setter;
3485 #endif
3486 /* FIXME: What if something in cc0/jump uses value set in new insn? */
3487 new_insn = emit_insn_before_noloc (pat, insn, bb);
3490 /* Likewise if the last insn is a call, as will happen in the presence
3491 of exception handling. */
3492 else if (CALL_P (insn)
3493 && (!single_succ_p (bb)
3494 || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
3496 /* Keeping in mind SMALL_REGISTER_CLASSES and parameters in registers,
3497 we search backward and place the instructions before the first
3498 parameter is loaded. Do this for everyone for consistency and a
3499 presumption that we'll get better code elsewhere as well.
3501 It should always be the case that we can put these instructions
3502 anywhere in the basic block with performing PRE optimizations.
3503 Check this. */
3505 gcc_assert (!pre
3506 || TEST_BIT (antloc[bb->index], expr->bitmap_index)
3507 || TEST_BIT (transp[bb->index], expr->bitmap_index));
3509 /* Since different machines initialize their parameter registers
3510 in different orders, assume nothing. Collect the set of all
3511 parameter registers. */
3512 insn = find_first_parameter_load (insn, BB_HEAD (bb));
3514 /* If we found all the parameter loads, then we want to insert
3515 before the first parameter load.
3517 If we did not find all the parameter loads, then we might have
3518 stopped on the head of the block, which could be a CODE_LABEL.
3519 If we inserted before the CODE_LABEL, then we would be putting
3520 the insn in the wrong basic block. In that case, put the insn
3521 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
3522 while (LABEL_P (insn)
3523 || NOTE_INSN_BASIC_BLOCK_P (insn))
3524 insn = NEXT_INSN (insn);
3526 new_insn = emit_insn_before_noloc (pat, insn, bb);
3528 else
3529 new_insn = emit_insn_after_noloc (pat, insn, bb);
3531 while (1)
3533 if (INSN_P (pat))
3534 add_label_notes (PATTERN (pat), new_insn);
3535 if (pat == pat_end)
3536 break;
3537 pat = NEXT_INSN (pat);
3540 gcse_create_count++;
3542 if (dump_file)
3544 fprintf (dump_file, "PRE/HOIST: end of bb %d, insn %d, ",
3545 bb->index, INSN_UID (new_insn));
3546 fprintf (dump_file, "copying expression %d to reg %d\n",
3547 expr->bitmap_index, regno);
3551 /* Insert partially redundant expressions on edges in the CFG to make
3552 the expressions fully redundant. */
3554 static int
3555 pre_edge_insert (struct edge_list *edge_list, struct expr **index_map)
3557 int e, i, j, num_edges, set_size, did_insert = 0;
3558 sbitmap *inserted;
3560 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
3561 if it reaches any of the deleted expressions. */
3563 set_size = pre_insert_map[0]->size;
3564 num_edges = NUM_EDGES (edge_list);
3565 inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
3566 sbitmap_vector_zero (inserted, num_edges);
3568 for (e = 0; e < num_edges; e++)
3570 int indx;
3571 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
3573 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
3575 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
3577 for (j = indx; insert && j < (int) expr_hash_table.n_elems; j++, insert >>= 1)
3578 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
3580 struct expr *expr = index_map[j];
3581 struct occr *occr;
3583 /* Now look at each deleted occurrence of this expression. */
3584 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
3586 if (! occr->deleted_p)
3587 continue;
3589 /* Insert this expression on this edge if it would
3590 reach the deleted occurrence in BB. */
3591 if (!TEST_BIT (inserted[e], j))
3593 rtx insn;
3594 edge eg = INDEX_EDGE (edge_list, e);
3596 /* We can't insert anything on an abnormal and
3597 critical edge, so we insert the insn at the end of
3598 the previous block. There are several alternatives
3599 detailed in Morgans book P277 (sec 10.5) for
3600 handling this situation. This one is easiest for
3601 now. */
3603 if (eg->flags & EDGE_ABNORMAL)
3604 insert_insn_end_basic_block (index_map[j], bb, 0);
3605 else
3607 insn = process_insert_insn (index_map[j]);
3608 insert_insn_on_edge (insn, eg);
3611 if (dump_file)
3613 fprintf (dump_file, "PRE: edge (%d,%d), ",
3614 bb->index,
3615 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
3616 fprintf (dump_file, "copy expression %d\n",
3617 expr->bitmap_index);
3620 update_ld_motion_stores (expr);
3621 SET_BIT (inserted[e], j);
3622 did_insert = 1;
3623 gcse_create_count++;
3630 sbitmap_vector_free (inserted);
3631 return did_insert;
3634 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
3635 Given "old_reg <- expr" (INSN), instead of adding after it
3636 reaching_reg <- old_reg
3637 it's better to do the following:
3638 reaching_reg <- expr
3639 old_reg <- reaching_reg
3640 because this way copy propagation can discover additional PRE
3641 opportunities. But if this fails, we try the old way.
3642 When "expr" is a store, i.e.
3643 given "MEM <- old_reg", instead of adding after it
3644 reaching_reg <- old_reg
3645 it's better to add it before as follows:
3646 reaching_reg <- old_reg
3647 MEM <- reaching_reg. */
3649 static void
3650 pre_insert_copy_insn (struct expr *expr, rtx insn)
3652 rtx reg = expr->reaching_reg;
3653 int regno = REGNO (reg);
3654 int indx = expr->bitmap_index;
3655 rtx pat = PATTERN (insn);
3656 rtx set, first_set, new_insn;
3657 rtx old_reg;
3658 int i;
3660 /* This block matches the logic in hash_scan_insn. */
3661 switch (GET_CODE (pat))
3663 case SET:
3664 set = pat;
3665 break;
3667 case PARALLEL:
3668 /* Search through the parallel looking for the set whose
3669 source was the expression that we're interested in. */
3670 first_set = NULL_RTX;
3671 set = NULL_RTX;
3672 for (i = 0; i < XVECLEN (pat, 0); i++)
3674 rtx x = XVECEXP (pat, 0, i);
3675 if (GET_CODE (x) == SET)
3677 /* If the source was a REG_EQUAL or REG_EQUIV note, we
3678 may not find an equivalent expression, but in this
3679 case the PARALLEL will have a single set. */
3680 if (first_set == NULL_RTX)
3681 first_set = x;
3682 if (expr_equiv_p (SET_SRC (x), expr->expr))
3684 set = x;
3685 break;
3690 gcc_assert (first_set);
3691 if (set == NULL_RTX)
3692 set = first_set;
3693 break;
3695 default:
3696 gcc_unreachable ();
3699 if (REG_P (SET_DEST (set)))
3701 old_reg = SET_DEST (set);
3702 /* Check if we can modify the set destination in the original insn. */
3703 if (validate_change (insn, &SET_DEST (set), reg, 0))
3705 new_insn = gen_move_insn (old_reg, reg);
3706 new_insn = emit_insn_after (new_insn, insn);
3708 else
3710 new_insn = gen_move_insn (reg, old_reg);
3711 new_insn = emit_insn_after (new_insn, insn);
3714 else /* This is possible only in case of a store to memory. */
3716 old_reg = SET_SRC (set);
3717 new_insn = gen_move_insn (reg, old_reg);
3719 /* Check if we can modify the set source in the original insn. */
3720 if (validate_change (insn, &SET_SRC (set), reg, 0))
3721 new_insn = emit_insn_before (new_insn, insn);
3722 else
3723 new_insn = emit_insn_after (new_insn, insn);
3726 gcse_create_count++;
3728 if (dump_file)
3729 fprintf (dump_file,
3730 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
3731 BLOCK_NUM (insn), INSN_UID (new_insn), indx,
3732 INSN_UID (insn), regno);
3735 /* Copy available expressions that reach the redundant expression
3736 to `reaching_reg'. */
3738 static void
3739 pre_insert_copies (void)
3741 unsigned int i, added_copy;
3742 struct expr *expr;
3743 struct occr *occr;
3744 struct occr *avail;
3746 /* For each available expression in the table, copy the result to
3747 `reaching_reg' if the expression reaches a deleted one.
3749 ??? The current algorithm is rather brute force.
3750 Need to do some profiling. */
3752 for (i = 0; i < expr_hash_table.size; i++)
3753 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
3755 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
3756 we don't want to insert a copy here because the expression may not
3757 really be redundant. So only insert an insn if the expression was
3758 deleted. This test also avoids further processing if the
3759 expression wasn't deleted anywhere. */
3760 if (expr->reaching_reg == NULL)
3761 continue;
3763 /* Set when we add a copy for that expression. */
3764 added_copy = 0;
3766 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
3768 if (! occr->deleted_p)
3769 continue;
3771 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
3773 rtx insn = avail->insn;
3775 /* No need to handle this one if handled already. */
3776 if (avail->copied_p)
3777 continue;
3779 /* Don't handle this one if it's a redundant one. */
3780 if (INSN_DELETED_P (insn))
3781 continue;
3783 /* Or if the expression doesn't reach the deleted one. */
3784 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
3785 expr,
3786 BLOCK_FOR_INSN (occr->insn)))
3787 continue;
3789 added_copy = 1;
3791 /* Copy the result of avail to reaching_reg. */
3792 pre_insert_copy_insn (expr, insn);
3793 avail->copied_p = 1;
3797 if (added_copy)
3798 update_ld_motion_stores (expr);
3802 /* Emit move from SRC to DEST noting the equivalence with expression computed
3803 in INSN. */
3804 static rtx
3805 gcse_emit_move_after (rtx src, rtx dest, rtx insn)
3807 rtx new_rtx;
3808 rtx set = single_set (insn), set2;
3809 rtx note;
3810 rtx eqv;
3812 /* This should never fail since we're creating a reg->reg copy
3813 we've verified to be valid. */
3815 new_rtx = emit_insn_after (gen_move_insn (dest, src), insn);
3817 /* Note the equivalence for local CSE pass. */
3818 set2 = single_set (new_rtx);
3819 if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
3820 return new_rtx;
3821 if ((note = find_reg_equal_equiv_note (insn)))
3822 eqv = XEXP (note, 0);
3823 else
3824 eqv = SET_SRC (set);
3826 set_unique_reg_note (new_rtx, REG_EQUAL, copy_insn_1 (eqv));
3828 return new_rtx;
3831 /* Delete redundant computations.
3832 Deletion is done by changing the insn to copy the `reaching_reg' of
3833 the expression into the result of the SET. It is left to later passes
3834 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
3836 Returns nonzero if a change is made. */
3838 static int
3839 pre_delete (void)
3841 unsigned int i;
3842 int changed;
3843 struct expr *expr;
3844 struct occr *occr;
3846 changed = 0;
3847 for (i = 0; i < expr_hash_table.size; i++)
3848 for (expr = expr_hash_table.table[i];
3849 expr != NULL;
3850 expr = expr->next_same_hash)
3852 int indx = expr->bitmap_index;
3854 /* We only need to search antic_occr since we require
3855 ANTLOC != 0. */
3857 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
3859 rtx insn = occr->insn;
3860 rtx set;
3861 basic_block bb = BLOCK_FOR_INSN (insn);
3863 /* We only delete insns that have a single_set. */
3864 if (TEST_BIT (pre_delete_map[bb->index], indx)
3865 && (set = single_set (insn)) != 0
3866 && dbg_cnt (pre_insn))
3868 /* Create a pseudo-reg to store the result of reaching
3869 expressions into. Get the mode for the new pseudo from
3870 the mode of the original destination pseudo. */
3871 if (expr->reaching_reg == NULL)
3872 expr->reaching_reg = gen_reg_rtx_and_attrs (SET_DEST (set));
3874 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
3875 delete_insn (insn);
3876 occr->deleted_p = 1;
3877 changed = 1;
3878 gcse_subst_count++;
3880 if (dump_file)
3882 fprintf (dump_file,
3883 "PRE: redundant insn %d (expression %d) in ",
3884 INSN_UID (insn), indx);
3885 fprintf (dump_file, "bb %d, reaching reg is %d\n",
3886 bb->index, REGNO (expr->reaching_reg));
3892 return changed;
3895 /* Perform GCSE optimizations using PRE.
3896 This is called by one_pre_gcse_pass after all the dataflow analysis
3897 has been done.
3899 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
3900 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
3901 Compiler Design and Implementation.
3903 ??? A new pseudo reg is created to hold the reaching expression. The nice
3904 thing about the classical approach is that it would try to use an existing
3905 reg. If the register can't be adequately optimized [i.e. we introduce
3906 reload problems], one could add a pass here to propagate the new register
3907 through the block.
3909 ??? We don't handle single sets in PARALLELs because we're [currently] not
3910 able to copy the rest of the parallel when we insert copies to create full
3911 redundancies from partial redundancies. However, there's no reason why we
3912 can't handle PARALLELs in the cases where there are no partial
3913 redundancies. */
3915 static int
3916 pre_gcse (void)
3918 unsigned int i;
3919 int did_insert, changed;
3920 struct expr **index_map;
3921 struct expr *expr;
3923 /* Compute a mapping from expression number (`bitmap_index') to
3924 hash table entry. */
3926 index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
3927 for (i = 0; i < expr_hash_table.size; i++)
3928 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
3929 index_map[expr->bitmap_index] = expr;
3931 /* Delete the redundant insns first so that
3932 - we know what register to use for the new insns and for the other
3933 ones with reaching expressions
3934 - we know which insns are redundant when we go to create copies */
3936 changed = pre_delete ();
3937 did_insert = pre_edge_insert (edge_list, index_map);
3939 /* In other places with reaching expressions, copy the expression to the
3940 specially allocated pseudo-reg that reaches the redundant expr. */
3941 pre_insert_copies ();
3942 if (did_insert)
3944 commit_edge_insertions ();
3945 changed = 1;
3948 free (index_map);
3949 return changed;
3952 /* Top level routine to perform one PRE GCSE pass.
3954 Return nonzero if a change was made. */
3956 static int
3957 one_pre_gcse_pass (void)
3959 int changed = 0;
3961 gcse_subst_count = 0;
3962 gcse_create_count = 0;
3964 /* Return if there's nothing to do, or it is too expensive. */
3965 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
3966 || is_too_expensive (_("PRE disabled")))
3967 return 0;
3969 /* We need alias. */
3970 init_alias_analysis ();
3972 bytes_used = 0;
3973 gcc_obstack_init (&gcse_obstack);
3974 alloc_gcse_mem ();
3976 alloc_hash_table (&expr_hash_table, 0);
3977 add_noreturn_fake_exit_edges ();
3978 if (flag_gcse_lm)
3979 compute_ld_motion_mems ();
3981 compute_hash_table (&expr_hash_table);
3982 trim_ld_motion_mems ();
3983 if (dump_file)
3984 dump_hash_table (dump_file, "Expression", &expr_hash_table);
3986 if (expr_hash_table.n_elems > 0)
3988 alloc_pre_mem (last_basic_block, expr_hash_table.n_elems);
3989 compute_pre_data ();
3990 changed |= pre_gcse ();
3991 free_edge_list (edge_list);
3992 free_pre_mem ();
3995 free_ldst_mems ();
3996 remove_fake_exit_edges ();
3997 free_hash_table (&expr_hash_table);
3999 free_gcse_mem ();
4000 obstack_free (&gcse_obstack, NULL);
4002 /* We are finished with alias. */
4003 end_alias_analysis ();
4005 if (dump_file)
4007 fprintf (dump_file, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
4008 current_function_name (), n_basic_blocks, bytes_used);
4009 fprintf (dump_file, "%d substs, %d insns created\n",
4010 gcse_subst_count, gcse_create_count);
4013 return changed;
4016 /* If X contains any LABEL_REF's, add REG_LABEL_OPERAND notes for them
4017 to INSN. If such notes are added to an insn which references a
4018 CODE_LABEL, the LABEL_NUSES count is incremented. We have to add
4019 that note, because the following loop optimization pass requires
4020 them. */
4022 /* ??? If there was a jump optimization pass after gcse and before loop,
4023 then we would not need to do this here, because jump would add the
4024 necessary REG_LABEL_OPERAND and REG_LABEL_TARGET notes. */
4026 static void
4027 add_label_notes (rtx x, rtx insn)
4029 enum rtx_code code = GET_CODE (x);
4030 int i, j;
4031 const char *fmt;
4033 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
4035 /* This code used to ignore labels that referred to dispatch tables to
4036 avoid flow generating (slightly) worse code.
4038 We no longer ignore such label references (see LABEL_REF handling in
4039 mark_jump_label for additional information). */
4041 /* There's no reason for current users to emit jump-insns with
4042 such a LABEL_REF, so we don't have to handle REG_LABEL_TARGET
4043 notes. */
4044 gcc_assert (!JUMP_P (insn));
4045 add_reg_note (insn, REG_LABEL_OPERAND, XEXP (x, 0));
4047 if (LABEL_P (XEXP (x, 0)))
4048 LABEL_NUSES (XEXP (x, 0))++;
4050 return;
4053 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
4055 if (fmt[i] == 'e')
4056 add_label_notes (XEXP (x, i), insn);
4057 else if (fmt[i] == 'E')
4058 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4059 add_label_notes (XVECEXP (x, i, j), insn);
4063 /* Compute transparent outgoing information for each block.
4065 An expression is transparent to an edge unless it is killed by
4066 the edge itself. This can only happen with abnormal control flow,
4067 when the edge is traversed through a call. This happens with
4068 non-local labels and exceptions.
4070 This would not be necessary if we split the edge. While this is
4071 normally impossible for abnormal critical edges, with some effort
4072 it should be possible with exception handling, since we still have
4073 control over which handler should be invoked. But due to increased
4074 EH table sizes, this may not be worthwhile. */
4076 static void
4077 compute_transpout (void)
4079 basic_block bb;
4080 unsigned int i;
4081 struct expr *expr;
4083 sbitmap_vector_ones (transpout, last_basic_block);
4085 FOR_EACH_BB (bb)
4087 /* Note that flow inserted a nop at the end of basic blocks that
4088 end in call instructions for reasons other than abnormal
4089 control flow. */
4090 if (! CALL_P (BB_END (bb)))
4091 continue;
4093 for (i = 0; i < expr_hash_table.size; i++)
4094 for (expr = expr_hash_table.table[i]; expr ; expr = expr->next_same_hash)
4095 if (MEM_P (expr->expr))
4097 if (GET_CODE (XEXP (expr->expr, 0)) == SYMBOL_REF
4098 && CONSTANT_POOL_ADDRESS_P (XEXP (expr->expr, 0)))
4099 continue;
4101 /* ??? Optimally, we would use interprocedural alias
4102 analysis to determine if this mem is actually killed
4103 by this call. */
4104 RESET_BIT (transpout[bb->index], expr->bitmap_index);
4109 /* Code Hoisting variables and subroutines. */
4111 /* Very busy expressions. */
4112 static sbitmap *hoist_vbein;
4113 static sbitmap *hoist_vbeout;
4115 /* Hoistable expressions. */
4116 static sbitmap *hoist_exprs;
4118 /* ??? We could compute post dominators and run this algorithm in
4119 reverse to perform tail merging, doing so would probably be
4120 more effective than the tail merging code in jump.c.
4122 It's unclear if tail merging could be run in parallel with
4123 code hoisting. It would be nice. */
4125 /* Allocate vars used for code hoisting analysis. */
4127 static void
4128 alloc_code_hoist_mem (int n_blocks, int n_exprs)
4130 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
4131 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
4132 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
4134 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
4135 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
4136 hoist_exprs = sbitmap_vector_alloc (n_blocks, n_exprs);
4137 transpout = sbitmap_vector_alloc (n_blocks, n_exprs);
4140 /* Free vars used for code hoisting analysis. */
4142 static void
4143 free_code_hoist_mem (void)
4145 sbitmap_vector_free (antloc);
4146 sbitmap_vector_free (transp);
4147 sbitmap_vector_free (comp);
4149 sbitmap_vector_free (hoist_vbein);
4150 sbitmap_vector_free (hoist_vbeout);
4151 sbitmap_vector_free (hoist_exprs);
4152 sbitmap_vector_free (transpout);
4154 free_dominance_info (CDI_DOMINATORS);
4157 /* Compute the very busy expressions at entry/exit from each block.
4159 An expression is very busy if all paths from a given point
4160 compute the expression. */
4162 static void
4163 compute_code_hoist_vbeinout (void)
4165 int changed, passes;
4166 basic_block bb;
4168 sbitmap_vector_zero (hoist_vbeout, last_basic_block);
4169 sbitmap_vector_zero (hoist_vbein, last_basic_block);
4171 passes = 0;
4172 changed = 1;
4174 while (changed)
4176 changed = 0;
4178 /* We scan the blocks in the reverse order to speed up
4179 the convergence. */
4180 FOR_EACH_BB_REVERSE (bb)
4182 if (bb->next_bb != EXIT_BLOCK_PTR)
4183 sbitmap_intersection_of_succs (hoist_vbeout[bb->index],
4184 hoist_vbein, bb->index);
4186 changed |= sbitmap_a_or_b_and_c_cg (hoist_vbein[bb->index],
4187 antloc[bb->index],
4188 hoist_vbeout[bb->index],
4189 transp[bb->index]);
4192 passes++;
4195 if (dump_file)
4196 fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);
4199 /* Top level routine to do the dataflow analysis needed by code hoisting. */
4201 static void
4202 compute_code_hoist_data (void)
4204 compute_local_properties (transp, comp, antloc, &expr_hash_table);
4205 compute_transpout ();
4206 compute_code_hoist_vbeinout ();
4207 calculate_dominance_info (CDI_DOMINATORS);
4208 if (dump_file)
4209 fprintf (dump_file, "\n");
4212 /* Determine if the expression identified by EXPR_INDEX would
4213 reach BB unimpared if it was placed at the end of EXPR_BB.
4215 It's unclear exactly what Muchnick meant by "unimpared". It seems
4216 to me that the expression must either be computed or transparent in
4217 *every* block in the path(s) from EXPR_BB to BB. Any other definition
4218 would allow the expression to be hoisted out of loops, even if
4219 the expression wasn't a loop invariant.
4221 Contrast this to reachability for PRE where an expression is
4222 considered reachable if *any* path reaches instead of *all*
4223 paths. */
4225 static int
4226 hoist_expr_reaches_here_p (basic_block expr_bb, int expr_index, basic_block bb, char *visited)
4228 edge pred;
4229 edge_iterator ei;
4230 int visited_allocated_locally = 0;
4233 if (visited == NULL)
4235 visited_allocated_locally = 1;
4236 visited = XCNEWVEC (char, last_basic_block);
4239 FOR_EACH_EDGE (pred, ei, bb->preds)
4241 basic_block pred_bb = pred->src;
4243 if (pred->src == ENTRY_BLOCK_PTR)
4244 break;
4245 else if (pred_bb == expr_bb)
4246 continue;
4247 else if (visited[pred_bb->index])
4248 continue;
4250 /* Does this predecessor generate this expression? */
4251 else if (TEST_BIT (comp[pred_bb->index], expr_index))
4252 break;
4253 else if (! TEST_BIT (transp[pred_bb->index], expr_index))
4254 break;
4256 /* Not killed. */
4257 else
4259 visited[pred_bb->index] = 1;
4260 if (! hoist_expr_reaches_here_p (expr_bb, expr_index,
4261 pred_bb, visited))
4262 break;
4265 if (visited_allocated_locally)
4266 free (visited);
4268 return (pred == NULL);
4271 /* Actually perform code hoisting. */
4273 static int
4274 hoist_code (void)
4276 basic_block bb, dominated;
4277 VEC (basic_block, heap) *domby;
4278 unsigned int i,j;
4279 struct expr **index_map;
4280 struct expr *expr;
4281 int changed = 0;
4283 sbitmap_vector_zero (hoist_exprs, last_basic_block);
4285 /* Compute a mapping from expression number (`bitmap_index') to
4286 hash table entry. */
4288 index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
4289 for (i = 0; i < expr_hash_table.size; i++)
4290 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
4291 index_map[expr->bitmap_index] = expr;
4293 /* Walk over each basic block looking for potentially hoistable
4294 expressions, nothing gets hoisted from the entry block. */
4295 FOR_EACH_BB (bb)
4297 int found = 0;
4298 int insn_inserted_p;
4300 domby = get_dominated_by (CDI_DOMINATORS, bb);
4301 /* Examine each expression that is very busy at the exit of this
4302 block. These are the potentially hoistable expressions. */
4303 for (i = 0; i < hoist_vbeout[bb->index]->n_bits; i++)
4305 int hoistable = 0;
4307 if (TEST_BIT (hoist_vbeout[bb->index], i)
4308 && TEST_BIT (transpout[bb->index], i))
4310 /* We've found a potentially hoistable expression, now
4311 we look at every block BB dominates to see if it
4312 computes the expression. */
4313 for (j = 0; VEC_iterate (basic_block, domby, j, dominated); j++)
4315 /* Ignore self dominance. */
4316 if (bb == dominated)
4317 continue;
4318 /* We've found a dominated block, now see if it computes
4319 the busy expression and whether or not moving that
4320 expression to the "beginning" of that block is safe. */
4321 if (!TEST_BIT (antloc[dominated->index], i))
4322 continue;
4324 /* Note if the expression would reach the dominated block
4325 unimpared if it was placed at the end of BB.
4327 Keep track of how many times this expression is hoistable
4328 from a dominated block into BB. */
4329 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
4330 hoistable++;
4333 /* If we found more than one hoistable occurrence of this
4334 expression, then note it in the bitmap of expressions to
4335 hoist. It makes no sense to hoist things which are computed
4336 in only one BB, and doing so tends to pessimize register
4337 allocation. One could increase this value to try harder
4338 to avoid any possible code expansion due to register
4339 allocation issues; however experiments have shown that
4340 the vast majority of hoistable expressions are only movable
4341 from two successors, so raising this threshold is likely
4342 to nullify any benefit we get from code hoisting. */
4343 if (hoistable > 1)
4345 SET_BIT (hoist_exprs[bb->index], i);
4346 found = 1;
4350 /* If we found nothing to hoist, then quit now. */
4351 if (! found)
4353 VEC_free (basic_block, heap, domby);
4354 continue;
4357 /* Loop over all the hoistable expressions. */
4358 for (i = 0; i < hoist_exprs[bb->index]->n_bits; i++)
4360 /* We want to insert the expression into BB only once, so
4361 note when we've inserted it. */
4362 insn_inserted_p = 0;
4364 /* These tests should be the same as the tests above. */
4365 if (TEST_BIT (hoist_exprs[bb->index], i))
4367 /* We've found a potentially hoistable expression, now
4368 we look at every block BB dominates to see if it
4369 computes the expression. */
4370 for (j = 0; VEC_iterate (basic_block, domby, j, dominated); j++)
4372 /* Ignore self dominance. */
4373 if (bb == dominated)
4374 continue;
4376 /* We've found a dominated block, now see if it computes
4377 the busy expression and whether or not moving that
4378 expression to the "beginning" of that block is safe. */
4379 if (!TEST_BIT (antloc[dominated->index], i))
4380 continue;
4382 /* The expression is computed in the dominated block and
4383 it would be safe to compute it at the start of the
4384 dominated block. Now we have to determine if the
4385 expression would reach the dominated block if it was
4386 placed at the end of BB. */
4387 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
4389 struct expr *expr = index_map[i];
4390 struct occr *occr = expr->antic_occr;
4391 rtx insn;
4392 rtx set;
4394 /* Find the right occurrence of this expression. */
4395 while (BLOCK_FOR_INSN (occr->insn) != dominated && occr)
4396 occr = occr->next;
4398 gcc_assert (occr);
4399 insn = occr->insn;
4400 set = single_set (insn);
4401 gcc_assert (set);
4403 /* Create a pseudo-reg to store the result of reaching
4404 expressions into. Get the mode for the new pseudo
4405 from the mode of the original destination pseudo. */
4406 if (expr->reaching_reg == NULL)
4407 expr->reaching_reg
4408 = gen_reg_rtx_and_attrs (SET_DEST (set));
4410 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
4411 delete_insn (insn);
4412 occr->deleted_p = 1;
4413 changed = 1;
4414 gcse_subst_count++;
4416 if (!insn_inserted_p)
4418 insert_insn_end_basic_block (index_map[i], bb, 0);
4419 insn_inserted_p = 1;
4425 VEC_free (basic_block, heap, domby);
4428 free (index_map);
4430 return changed;
4433 /* Top level routine to perform one code hoisting (aka unification) pass
4435 Return nonzero if a change was made. */
4437 static int
4438 one_code_hoisting_pass (void)
4440 int changed = 0;
4442 gcse_subst_count = 0;
4443 gcse_create_count = 0;
4445 /* Return if there's nothing to do, or it is too expensive. */
4446 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
4447 || is_too_expensive (_("GCSE disabled")))
4448 return 0;
4450 /* We need alias. */
4451 init_alias_analysis ();
4453 bytes_used = 0;
4454 gcc_obstack_init (&gcse_obstack);
4455 alloc_gcse_mem ();
4457 alloc_hash_table (&expr_hash_table, 0);
4458 compute_hash_table (&expr_hash_table);
4459 if (dump_file)
4460 dump_hash_table (dump_file, "Code Hosting Expressions", &expr_hash_table);
4462 if (expr_hash_table.n_elems > 0)
4464 alloc_code_hoist_mem (last_basic_block, expr_hash_table.n_elems);
4465 compute_code_hoist_data ();
4466 changed = hoist_code ();
4467 free_code_hoist_mem ();
4470 free_hash_table (&expr_hash_table);
4471 free_gcse_mem ();
4472 obstack_free (&gcse_obstack, NULL);
4474 /* We are finished with alias. */
4475 end_alias_analysis ();
4477 if (dump_file)
4479 fprintf (dump_file, "HOIST of %s, %d basic blocks, %d bytes needed, ",
4480 current_function_name (), n_basic_blocks, bytes_used);
4481 fprintf (dump_file, "%d substs, %d insns created\n",
4482 gcse_subst_count, gcse_create_count);
4485 return changed;
4488 /* Here we provide the things required to do store motion towards
4489 the exit. In order for this to be effective, gcse also needed to
4490 be taught how to move a load when it is kill only by a store to itself.
4492 int i;
4493 float a[10];
4495 void foo(float scale)
4497 for (i=0; i<10; i++)
4498 a[i] *= scale;
4501 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
4502 the load out since its live around the loop, and stored at the bottom
4503 of the loop.
4505 The 'Load Motion' referred to and implemented in this file is
4506 an enhancement to gcse which when using edge based lcm, recognizes
4507 this situation and allows gcse to move the load out of the loop.
4509 Once gcse has hoisted the load, store motion can then push this
4510 load towards the exit, and we end up with no loads or stores of 'i'
4511 in the loop. */
4513 static hashval_t
4514 pre_ldst_expr_hash (const void *p)
4516 int do_not_record_p = 0;
4517 const struct ls_expr *const x = (const struct ls_expr *) p;
4518 return hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
4521 static int
4522 pre_ldst_expr_eq (const void *p1, const void *p2)
4524 const struct ls_expr *const ptr1 = (const struct ls_expr *) p1,
4525 *const ptr2 = (const struct ls_expr *) p2;
4526 return expr_equiv_p (ptr1->pattern, ptr2->pattern);
4529 /* This will search the ldst list for a matching expression. If it
4530 doesn't find one, we create one and initialize it. */
4532 static struct ls_expr *
4533 ldst_entry (rtx x)
4535 int do_not_record_p = 0;
4536 struct ls_expr * ptr;
4537 unsigned int hash;
4538 void **slot;
4539 struct ls_expr e;
4541 hash = hash_rtx (x, GET_MODE (x), &do_not_record_p,
4542 NULL, /*have_reg_qty=*/false);
4544 e.pattern = x;
4545 slot = htab_find_slot_with_hash (pre_ldst_table, &e, hash, INSERT);
4546 if (*slot)
4547 return (struct ls_expr *)*slot;
4549 ptr = XNEW (struct ls_expr);
4551 ptr->next = pre_ldst_mems;
4552 ptr->expr = NULL;
4553 ptr->pattern = x;
4554 ptr->pattern_regs = NULL_RTX;
4555 ptr->loads = NULL_RTX;
4556 ptr->stores = NULL_RTX;
4557 ptr->reaching_reg = NULL_RTX;
4558 ptr->invalid = 0;
4559 ptr->index = 0;
4560 ptr->hash_index = hash;
4561 pre_ldst_mems = ptr;
4562 *slot = ptr;
4564 return ptr;
4567 /* Free up an individual ldst entry. */
4569 static void
4570 free_ldst_entry (struct ls_expr * ptr)
4572 free_INSN_LIST_list (& ptr->loads);
4573 free_INSN_LIST_list (& ptr->stores);
4575 free (ptr);
4578 /* Free up all memory associated with the ldst list. */
4580 static void
4581 free_ldst_mems (void)
4583 if (pre_ldst_table)
4584 htab_delete (pre_ldst_table);
4585 pre_ldst_table = NULL;
4587 while (pre_ldst_mems)
4589 struct ls_expr * tmp = pre_ldst_mems;
4591 pre_ldst_mems = pre_ldst_mems->next;
4593 free_ldst_entry (tmp);
4596 pre_ldst_mems = NULL;
4599 /* Dump debugging info about the ldst list. */
4601 static void
4602 print_ldst_list (FILE * file)
4604 struct ls_expr * ptr;
4606 fprintf (file, "LDST list: \n");
4608 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
4610 fprintf (file, " Pattern (%3d): ", ptr->index);
4612 print_rtl (file, ptr->pattern);
4614 fprintf (file, "\n Loads : ");
4616 if (ptr->loads)
4617 print_rtl (file, ptr->loads);
4618 else
4619 fprintf (file, "(nil)");
4621 fprintf (file, "\n Stores : ");
4623 if (ptr->stores)
4624 print_rtl (file, ptr->stores);
4625 else
4626 fprintf (file, "(nil)");
4628 fprintf (file, "\n\n");
4631 fprintf (file, "\n");
4634 /* Returns 1 if X is in the list of ldst only expressions. */
4636 static struct ls_expr *
4637 find_rtx_in_ldst (rtx x)
4639 struct ls_expr e;
4640 void **slot;
4641 if (!pre_ldst_table)
4642 return NULL;
4643 e.pattern = x;
4644 slot = htab_find_slot (pre_ldst_table, &e, NO_INSERT);
4645 if (!slot || ((struct ls_expr *)*slot)->invalid)
4646 return NULL;
4647 return (struct ls_expr *) *slot;
4650 /* Return first item in the list. */
4652 static inline struct ls_expr *
4653 first_ls_expr (void)
4655 return pre_ldst_mems;
4658 /* Return the next item in the list after the specified one. */
4660 static inline struct ls_expr *
4661 next_ls_expr (struct ls_expr * ptr)
4663 return ptr->next;
4666 /* Load Motion for loads which only kill themselves. */
4668 /* Return true if x is a simple MEM operation, with no registers or
4669 side effects. These are the types of loads we consider for the
4670 ld_motion list, otherwise we let the usual aliasing take care of it. */
4672 static int
4673 simple_mem (const_rtx x)
4675 if (! MEM_P (x))
4676 return 0;
4678 if (MEM_VOLATILE_P (x))
4679 return 0;
4681 if (GET_MODE (x) == BLKmode)
4682 return 0;
4684 /* If we are handling exceptions, we must be careful with memory references
4685 that may trap. If we are not, the behavior is undefined, so we may just
4686 continue. */
4687 if (flag_non_call_exceptions && may_trap_p (x))
4688 return 0;
4690 if (side_effects_p (x))
4691 return 0;
4693 /* Do not consider function arguments passed on stack. */
4694 if (reg_mentioned_p (stack_pointer_rtx, x))
4695 return 0;
4697 if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
4698 return 0;
4700 return 1;
4703 /* Make sure there isn't a buried reference in this pattern anywhere.
4704 If there is, invalidate the entry for it since we're not capable
4705 of fixing it up just yet.. We have to be sure we know about ALL
4706 loads since the aliasing code will allow all entries in the
4707 ld_motion list to not-alias itself. If we miss a load, we will get
4708 the wrong value since gcse might common it and we won't know to
4709 fix it up. */
4711 static void
4712 invalidate_any_buried_refs (rtx x)
4714 const char * fmt;
4715 int i, j;
4716 struct ls_expr * ptr;
4718 /* Invalidate it in the list. */
4719 if (MEM_P (x) && simple_mem (x))
4721 ptr = ldst_entry (x);
4722 ptr->invalid = 1;
4725 /* Recursively process the insn. */
4726 fmt = GET_RTX_FORMAT (GET_CODE (x));
4728 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4730 if (fmt[i] == 'e')
4731 invalidate_any_buried_refs (XEXP (x, i));
4732 else if (fmt[i] == 'E')
4733 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4734 invalidate_any_buried_refs (XVECEXP (x, i, j));
4738 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
4739 being defined as MEM loads and stores to symbols, with no side effects
4740 and no registers in the expression. For a MEM destination, we also
4741 check that the insn is still valid if we replace the destination with a
4742 REG, as is done in update_ld_motion_stores. If there are any uses/defs
4743 which don't match this criteria, they are invalidated and trimmed out
4744 later. */
4746 static void
4747 compute_ld_motion_mems (void)
4749 struct ls_expr * ptr;
4750 basic_block bb;
4751 rtx insn;
4753 pre_ldst_mems = NULL;
4754 pre_ldst_table = htab_create (13, pre_ldst_expr_hash,
4755 pre_ldst_expr_eq, NULL);
4757 FOR_EACH_BB (bb)
4759 FOR_BB_INSNS (bb, insn)
4761 if (NONDEBUG_INSN_P (insn))
4763 if (GET_CODE (PATTERN (insn)) == SET)
4765 rtx src = SET_SRC (PATTERN (insn));
4766 rtx dest = SET_DEST (PATTERN (insn));
4768 /* Check for a simple LOAD... */
4769 if (MEM_P (src) && simple_mem (src))
4771 ptr = ldst_entry (src);
4772 if (REG_P (dest))
4773 ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
4774 else
4775 ptr->invalid = 1;
4777 else
4779 /* Make sure there isn't a buried load somewhere. */
4780 invalidate_any_buried_refs (src);
4783 /* Check for stores. Don't worry about aliased ones, they
4784 will block any movement we might do later. We only care
4785 about this exact pattern since those are the only
4786 circumstance that we will ignore the aliasing info. */
4787 if (MEM_P (dest) && simple_mem (dest))
4789 ptr = ldst_entry (dest);
4791 if (! MEM_P (src)
4792 && GET_CODE (src) != ASM_OPERANDS
4793 /* Check for REG manually since want_to_gcse_p
4794 returns 0 for all REGs. */
4795 && can_assign_to_reg_without_clobbers_p (src))
4796 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
4797 else
4798 ptr->invalid = 1;
4801 else
4802 invalidate_any_buried_refs (PATTERN (insn));
4808 /* Remove any references that have been either invalidated or are not in the
4809 expression list for pre gcse. */
4811 static void
4812 trim_ld_motion_mems (void)
4814 struct ls_expr * * last = & pre_ldst_mems;
4815 struct ls_expr * ptr = pre_ldst_mems;
4817 while (ptr != NULL)
4819 struct expr * expr;
4821 /* Delete if entry has been made invalid. */
4822 if (! ptr->invalid)
4824 /* Delete if we cannot find this mem in the expression list. */
4825 unsigned int hash = ptr->hash_index % expr_hash_table.size;
4827 for (expr = expr_hash_table.table[hash];
4828 expr != NULL;
4829 expr = expr->next_same_hash)
4830 if (expr_equiv_p (expr->expr, ptr->pattern))
4831 break;
4833 else
4834 expr = (struct expr *) 0;
4836 if (expr)
4838 /* Set the expression field if we are keeping it. */
4839 ptr->expr = expr;
4840 last = & ptr->next;
4841 ptr = ptr->next;
4843 else
4845 *last = ptr->next;
4846 htab_remove_elt_with_hash (pre_ldst_table, ptr, ptr->hash_index);
4847 free_ldst_entry (ptr);
4848 ptr = * last;
4852 /* Show the world what we've found. */
4853 if (dump_file && pre_ldst_mems != NULL)
4854 print_ldst_list (dump_file);
4857 /* This routine will take an expression which we are replacing with
4858 a reaching register, and update any stores that are needed if
4859 that expression is in the ld_motion list. Stores are updated by
4860 copying their SRC to the reaching register, and then storing
4861 the reaching register into the store location. These keeps the
4862 correct value in the reaching register for the loads. */
4864 static void
4865 update_ld_motion_stores (struct expr * expr)
4867 struct ls_expr * mem_ptr;
4869 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
4871 /* We can try to find just the REACHED stores, but is shouldn't
4872 matter to set the reaching reg everywhere... some might be
4873 dead and should be eliminated later. */
4875 /* We replace (set mem expr) with (set reg expr) (set mem reg)
4876 where reg is the reaching reg used in the load. We checked in
4877 compute_ld_motion_mems that we can replace (set mem expr) with
4878 (set reg expr) in that insn. */
4879 rtx list = mem_ptr->stores;
4881 for ( ; list != NULL_RTX; list = XEXP (list, 1))
4883 rtx insn = XEXP (list, 0);
4884 rtx pat = PATTERN (insn);
4885 rtx src = SET_SRC (pat);
4886 rtx reg = expr->reaching_reg;
4887 rtx copy, new_rtx;
4889 /* If we've already copied it, continue. */
4890 if (expr->reaching_reg == src)
4891 continue;
4893 if (dump_file)
4895 fprintf (dump_file, "PRE: store updated with reaching reg ");
4896 print_rtl (dump_file, expr->reaching_reg);
4897 fprintf (dump_file, ":\n ");
4898 print_inline_rtx (dump_file, insn, 8);
4899 fprintf (dump_file, "\n");
4902 copy = gen_move_insn (reg, copy_rtx (SET_SRC (pat)));
4903 new_rtx = emit_insn_before (copy, insn);
4904 SET_SRC (pat) = reg;
4905 df_insn_rescan (insn);
4907 /* un-recognize this pattern since it's probably different now. */
4908 INSN_CODE (insn) = -1;
4909 gcse_create_count++;
4914 /* Return true if the graph is too expensive to optimize. PASS is the
4915 optimization about to be performed. */
4917 static bool
4918 is_too_expensive (const char *pass)
4920 /* Trying to perform global optimizations on flow graphs which have
4921 a high connectivity will take a long time and is unlikely to be
4922 particularly useful.
4924 In normal circumstances a cfg should have about twice as many
4925 edges as blocks. But we do not want to punish small functions
4926 which have a couple switch statements. Rather than simply
4927 threshold the number of blocks, uses something with a more
4928 graceful degradation. */
4929 if (n_edges > 20000 + n_basic_blocks * 4)
4931 warning (OPT_Wdisabled_optimization,
4932 "%s: %d basic blocks and %d edges/basic block",
4933 pass, n_basic_blocks, n_edges / n_basic_blocks);
4935 return true;
4938 /* If allocating memory for the cprop bitmap would take up too much
4939 storage it's better just to disable the optimization. */
4940 if ((n_basic_blocks
4941 * SBITMAP_SET_SIZE (max_reg_num ())
4942 * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
4944 warning (OPT_Wdisabled_optimization,
4945 "%s: %d basic blocks and %d registers",
4946 pass, n_basic_blocks, max_reg_num ());
4948 return true;
4951 return false;
4955 /* Main function for the CPROP pass. */
4957 static int
4958 one_cprop_pass (void)
4960 int changed = 0;
4962 /* Return if there's nothing to do, or it is too expensive. */
4963 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
4964 || is_too_expensive (_ ("const/copy propagation disabled")))
4965 return 0;
4967 global_const_prop_count = local_const_prop_count = 0;
4968 global_copy_prop_count = local_copy_prop_count = 0;
4970 bytes_used = 0;
4971 gcc_obstack_init (&gcse_obstack);
4972 alloc_gcse_mem ();
4974 /* Do a local const/copy propagation pass first. The global pass
4975 only handles global opportunities.
4976 If the local pass changes something, remove any unreachable blocks
4977 because the CPROP global dataflow analysis may get into infinite
4978 loops for CFGs with unreachable blocks.
4980 FIXME: This local pass should not be necessary after CSE (but for
4981 some reason it still is). It is also (proven) not necessary
4982 to run the local pass right after FWPWOP.
4984 FIXME: The global analysis would not get into infinite loops if it
4985 would use the DF solver (via df_simple_dataflow) instead of
4986 the solver implemented in this file. */
4987 if (local_cprop_pass ())
4989 delete_unreachable_blocks ();
4990 df_analyze ();
4993 /* Determine implicit sets. */
4994 implicit_sets = XCNEWVEC (rtx, last_basic_block);
4995 find_implicit_sets ();
4997 alloc_hash_table (&set_hash_table, 1);
4998 compute_hash_table (&set_hash_table);
5000 /* Free implicit_sets before peak usage. */
5001 free (implicit_sets);
5002 implicit_sets = NULL;
5004 if (dump_file)
5005 dump_hash_table (dump_file, "SET", &set_hash_table);
5006 if (set_hash_table.n_elems > 0)
5008 basic_block bb;
5009 rtx insn;
5011 alloc_cprop_mem (last_basic_block, set_hash_table.n_elems);
5012 compute_cprop_data ();
5014 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb)
5016 /* Reset tables used to keep track of what's still valid [since
5017 the start of the block]. */
5018 reset_opr_set_tables ();
5020 FOR_BB_INSNS (bb, insn)
5021 if (INSN_P (insn))
5023 changed |= cprop_insn (insn);
5025 /* Keep track of everything modified by this insn. */
5026 /* ??? Need to be careful w.r.t. mods done to INSN.
5027 Don't call mark_oprs_set if we turned the
5028 insn into a NOTE. */
5029 if (! NOTE_P (insn))
5030 mark_oprs_set (insn);
5034 changed |= bypass_conditional_jumps ();
5035 free_cprop_mem ();
5038 free_hash_table (&set_hash_table);
5039 free_gcse_mem ();
5040 obstack_free (&gcse_obstack, NULL);
5042 if (dump_file)
5044 fprintf (dump_file, "CPROP of %s, %d basic blocks, %d bytes needed, ",
5045 current_function_name (), n_basic_blocks, bytes_used);
5046 fprintf (dump_file, "%d local const props, %d local copy props, ",
5047 local_const_prop_count, local_copy_prop_count);
5048 fprintf (dump_file, "%d global const props, %d global copy props\n\n",
5049 global_const_prop_count, global_copy_prop_count);
5052 return changed;
5056 /* All the passes implemented in this file. Each pass has its
5057 own gate and execute function, and at the end of the file a
5058 pass definition for passes.c.
5060 We do not construct an accurate cfg in functions which call
5061 setjmp, so none of these passes runs if the function calls
5062 setjmp.
5063 FIXME: Should just handle setjmp via REG_SETJMP notes. */
5065 static bool
5066 gate_rtl_cprop (void)
5068 return optimize > 0 && flag_gcse
5069 && !cfun->calls_setjmp
5070 && dbg_cnt (cprop);
5073 static unsigned int
5074 execute_rtl_cprop (void)
5076 delete_unreachable_blocks ();
5077 df_note_add_problem ();
5078 df_set_flags (DF_LR_RUN_DCE);
5079 df_analyze ();
5080 flag_rerun_cse_after_global_opts |= one_cprop_pass ();
5081 return 0;
5084 static bool
5085 gate_rtl_pre (void)
5087 return optimize > 0 && flag_gcse
5088 && !cfun->calls_setjmp
5089 && optimize_function_for_speed_p (cfun)
5090 && dbg_cnt (pre);
5093 static unsigned int
5094 execute_rtl_pre (void)
5096 delete_unreachable_blocks ();
5097 df_note_add_problem ();
5098 df_analyze ();
5099 flag_rerun_cse_after_global_opts |= one_pre_gcse_pass ();
5100 return 0;
5103 static bool
5104 gate_rtl_hoist (void)
5106 return optimize > 0 && flag_gcse
5107 && !cfun->calls_setjmp
5108 /* It does not make sense to run code hoisting unless we are optimizing
5109 for code size -- it rarely makes programs faster, and can make then
5110 bigger if we did PRE (when optimizing for space, we don't run PRE). */
5111 && optimize_function_for_size_p (cfun)
5112 && dbg_cnt (hoist);
5115 static unsigned int
5116 execute_rtl_hoist (void)
5118 delete_unreachable_blocks ();
5119 df_note_add_problem ();
5120 df_analyze ();
5121 flag_rerun_cse_after_global_opts |= one_code_hoisting_pass ();
5122 return 0;
5125 struct rtl_opt_pass pass_rtl_cprop =
5128 RTL_PASS,
5129 "cprop", /* name */
5130 gate_rtl_cprop, /* gate */
5131 execute_rtl_cprop, /* execute */
5132 NULL, /* sub */
5133 NULL, /* next */
5134 0, /* static_pass_number */
5135 TV_CPROP, /* tv_id */
5136 PROP_cfglayout, /* properties_required */
5137 0, /* properties_provided */
5138 0, /* properties_destroyed */
5139 0, /* todo_flags_start */
5140 TODO_df_finish | TODO_verify_rtl_sharing |
5141 TODO_dump_func |
5142 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
5146 struct rtl_opt_pass pass_rtl_pre =
5149 RTL_PASS,
5150 "pre", /* name */
5151 gate_rtl_pre, /* gate */
5152 execute_rtl_pre, /* execute */
5153 NULL, /* sub */
5154 NULL, /* next */
5155 0, /* static_pass_number */
5156 TV_PRE, /* tv_id */
5157 PROP_cfglayout, /* properties_required */
5158 0, /* properties_provided */
5159 0, /* properties_destroyed */
5160 0, /* todo_flags_start */
5161 TODO_df_finish | TODO_verify_rtl_sharing |
5162 TODO_dump_func |
5163 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
5167 struct rtl_opt_pass pass_rtl_hoist =
5170 RTL_PASS,
5171 "hoist", /* name */
5172 gate_rtl_hoist, /* gate */
5173 execute_rtl_hoist, /* execute */
5174 NULL, /* sub */
5175 NULL, /* next */
5176 0, /* static_pass_number */
5177 TV_HOIST, /* tv_id */
5178 PROP_cfglayout, /* properties_required */
5179 0, /* properties_provided */
5180 0, /* properties_destroyed */
5181 0, /* todo_flags_start */
5182 TODO_df_finish | TODO_verify_rtl_sharing |
5183 TODO_dump_func |
5184 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
5188 #include "gt-gcse.h"