2007-07-01 H.J. Lu <hongjiu.lu@intel.com>
[official-gcc.git] / gcc / gcse.c
blob8278714cefea420fa329fac315b222bf4aa0f2c4
1 /* Global common subexpression elimination/Partial redundancy elimination
2 and global constant/copy propagation for GNU compiler.
3 Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
4 2006, 2007 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
23 /* TODO
24 - reordering of memory allocation and freeing to be more space efficient
25 - do rough calc of how many regs are needed in each block, and a rough
26 calc of how many regs are available in each class and use that to
27 throttle back the code in cases where RTX_COST is minimal.
28 - a store to the same address as a load does not kill the load if the
29 source of the store is also the destination of the load. Handling this
30 allows more load motion, particularly out of loops.
31 - ability to realloc sbitmap vectors would allow one initial computation
32 of reg_set_in_block with only subsequent additions, rather than
33 recomputing it for each pass
37 /* References searched while implementing this.
39 Compilers Principles, Techniques and Tools
40 Aho, Sethi, Ullman
41 Addison-Wesley, 1988
43 Global Optimization by Suppression of Partial Redundancies
44 E. Morel, C. Renvoise
45 communications of the acm, Vol. 22, Num. 2, Feb. 1979
47 A Portable Machine-Independent Global Optimizer - Design and Measurements
48 Frederick Chow
49 Stanford Ph.D. thesis, Dec. 1983
51 A Fast Algorithm for Code Movement Optimization
52 D.M. Dhamdhere
53 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
55 A Solution to a Problem with Morel and Renvoise's
56 Global Optimization by Suppression of Partial Redundancies
57 K-H Drechsler, M.P. Stadel
58 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
60 Practical Adaptation of the Global Optimization
61 Algorithm of Morel and Renvoise
62 D.M. Dhamdhere
63 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
65 Efficiently Computing Static Single Assignment Form and the Control
66 Dependence Graph
67 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
68 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
70 Lazy Code Motion
71 J. Knoop, O. Ruthing, B. Steffen
72 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
74 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
75 Time for Reducible Flow Control
76 Thomas Ball
77 ACM Letters on Programming Languages and Systems,
78 Vol. 2, Num. 1-4, Mar-Dec 1993
80 An Efficient Representation for Sparse Sets
81 Preston Briggs, Linda Torczon
82 ACM Letters on Programming Languages and Systems,
83 Vol. 2, Num. 1-4, Mar-Dec 1993
85 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
86 K-H Drechsler, M.P. Stadel
87 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
89 Partial Dead Code Elimination
90 J. Knoop, O. Ruthing, B. Steffen
91 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
93 Effective Partial Redundancy Elimination
94 P. Briggs, K.D. Cooper
95 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
97 The Program Structure Tree: Computing Control Regions in Linear Time
98 R. Johnson, D. Pearson, K. Pingali
99 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
101 Optimal Code Motion: Theory and Practice
102 J. Knoop, O. Ruthing, B. Steffen
103 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
105 The power of assignment motion
106 J. Knoop, O. Ruthing, B. Steffen
107 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
109 Global code motion / global value numbering
110 C. Click
111 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
113 Value Driven Redundancy Elimination
114 L.T. Simpson
115 Rice University Ph.D. thesis, Apr. 1996
117 Value Numbering
118 L.T. Simpson
119 Massively Scalar Compiler Project, Rice University, Sep. 1996
121 High Performance Compilers for Parallel Computing
122 Michael Wolfe
123 Addison-Wesley, 1996
125 Advanced Compiler Design and Implementation
126 Steven Muchnick
127 Morgan Kaufmann, 1997
129 Building an Optimizing Compiler
130 Robert Morgan
131 Digital Press, 1998
133 People wishing to speed up the code here should read:
134 Elimination Algorithms for Data Flow Analysis
135 B.G. Ryder, M.C. Paull
136 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
138 How to Analyze Large Programs Efficiently and Informatively
139 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
140 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
142 People wishing to do something different can find various possibilities
143 in the above papers and elsewhere.
146 #include "config.h"
147 #include "system.h"
148 #include "coretypes.h"
149 #include "tm.h"
150 #include "toplev.h"
152 #include "rtl.h"
153 #include "tree.h"
154 #include "tm_p.h"
155 #include "regs.h"
156 #include "hard-reg-set.h"
157 #include "flags.h"
158 #include "real.h"
159 #include "insn-config.h"
160 #include "recog.h"
161 #include "basic-block.h"
162 #include "output.h"
163 #include "function.h"
164 #include "expr.h"
165 #include "except.h"
166 #include "ggc.h"
167 #include "params.h"
168 #include "cselib.h"
169 #include "intl.h"
170 #include "obstack.h"
171 #include "timevar.h"
172 #include "tree-pass.h"
173 #include "hashtab.h"
174 #include "df.h"
175 #include "dbgcnt.h"
177 /* Propagate flow information through back edges and thus enable PRE's
178 moving loop invariant calculations out of loops.
180 Originally this tended to create worse overall code, but several
181 improvements during the development of PRE seem to have made following
182 back edges generally a win.
184 Note much of the loop invariant code motion done here would normally
185 be done by loop.c, which has more heuristics for when to move invariants
186 out of loops. At some point we might need to move some of those
187 heuristics into gcse.c. */
189 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
190 are a superset of those done by GCSE.
192 We perform the following steps:
194 1) Compute basic block information.
196 2) Compute table of places where registers are set.
198 3) Perform copy/constant propagation.
200 4) Perform global cse using lazy code motion if not optimizing
201 for size, or code hoisting if we are.
203 5) Perform another pass of copy/constant propagation.
205 Two passes of copy/constant propagation are done because the first one
206 enables more GCSE and the second one helps to clean up the copies that
207 GCSE creates. This is needed more for PRE than for Classic because Classic
208 GCSE will try to use an existing register containing the common
209 subexpression rather than create a new one. This is harder to do for PRE
210 because of the code motion (which Classic GCSE doesn't do).
212 Expressions we are interested in GCSE-ing are of the form
213 (set (pseudo-reg) (expression)).
214 Function want_to_gcse_p says what these are.
216 PRE handles moving invariant expressions out of loops (by treating them as
217 partially redundant).
219 Eventually it would be nice to replace cse.c/gcse.c with SSA (static single
220 assignment) based GVN (global value numbering). L. T. Simpson's paper
221 (Rice University) on value numbering is a useful reference for this.
223 **********************
225 We used to support multiple passes but there are diminishing returns in
226 doing so. The first pass usually makes 90% of the changes that are doable.
227 A second pass can make a few more changes made possible by the first pass.
228 Experiments show any further passes don't make enough changes to justify
229 the expense.
231 A study of spec92 using an unlimited number of passes:
232 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
233 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
234 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
236 It was found doing copy propagation between each pass enables further
237 substitutions.
239 PRE is quite expensive in complicated functions because the DFA can take
240 a while to converge. Hence we only perform one pass. The parameter
241 max-gcse-passes can be modified if one wants to experiment.
243 **********************
245 The steps for PRE are:
247 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
249 2) Perform the data flow analysis for PRE.
251 3) Delete the redundant instructions
253 4) Insert the required copies [if any] that make the partially
254 redundant instructions fully redundant.
256 5) For other reaching expressions, insert an instruction to copy the value
257 to a newly created pseudo that will reach the redundant instruction.
259 The deletion is done first so that when we do insertions we
260 know which pseudo reg to use.
262 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
263 argue it is not. The number of iterations for the algorithm to converge
264 is typically 2-4 so I don't view it as that expensive (relatively speaking).
266 PRE GCSE depends heavily on the second CSE pass to clean up the copies
267 we create. To make an expression reach the place where it's redundant,
268 the result of the expression is copied to a new register, and the redundant
269 expression is deleted by replacing it with this new register. Classic GCSE
270 doesn't have this problem as much as it computes the reaching defs of
271 each register in each block and thus can try to use an existing
272 register. */
274 /* GCSE global vars. */
276 /* Note whether or not we should run jump optimization after gcse. We
277 want to do this for two cases.
279 * If we changed any jumps via cprop.
281 * If we added any labels via edge splitting. */
282 static int run_jump_opt_after_gcse;
284 /* An obstack for our working variables. */
285 static struct obstack gcse_obstack;
287 struct reg_use {rtx reg_rtx; };
289 /* Hash table of expressions. */
291 struct expr
293 /* The expression (SET_SRC for expressions, PATTERN for assignments). */
294 rtx expr;
295 /* Index in the available expression bitmaps. */
296 int bitmap_index;
297 /* Next entry with the same hash. */
298 struct expr *next_same_hash;
299 /* List of anticipatable occurrences in basic blocks in the function.
300 An "anticipatable occurrence" is one that is the first occurrence in the
301 basic block, the operands are not modified in the basic block prior
302 to the occurrence and the output is not used between the start of
303 the block and the occurrence. */
304 struct occr *antic_occr;
305 /* List of available occurrence in basic blocks in the function.
306 An "available occurrence" is one that is the last occurrence in the
307 basic block and the operands are not modified by following statements in
308 the basic block [including this insn]. */
309 struct occr *avail_occr;
310 /* Non-null if the computation is PRE redundant.
311 The value is the newly created pseudo-reg to record a copy of the
312 expression in all the places that reach the redundant copy. */
313 rtx reaching_reg;
316 /* Occurrence of an expression.
317 There is one per basic block. If a pattern appears more than once the
318 last appearance is used [or first for anticipatable expressions]. */
320 struct occr
322 /* Next occurrence of this expression. */
323 struct occr *next;
324 /* The insn that computes the expression. */
325 rtx insn;
326 /* Nonzero if this [anticipatable] occurrence has been deleted. */
327 char deleted_p;
328 /* Nonzero if this [available] occurrence has been copied to
329 reaching_reg. */
330 /* ??? This is mutually exclusive with deleted_p, so they could share
331 the same byte. */
332 char copied_p;
335 /* Expression and copy propagation hash tables.
336 Each hash table is an array of buckets.
337 ??? It is known that if it were an array of entries, structure elements
338 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
339 not clear whether in the final analysis a sufficient amount of memory would
340 be saved as the size of the available expression bitmaps would be larger
341 [one could build a mapping table without holes afterwards though].
342 Someday I'll perform the computation and figure it out. */
344 struct hash_table
346 /* The table itself.
347 This is an array of `expr_hash_table_size' elements. */
348 struct expr **table;
350 /* Size of the hash table, in elements. */
351 unsigned int size;
353 /* Number of hash table elements. */
354 unsigned int n_elems;
356 /* Whether the table is expression of copy propagation one. */
357 int set_p;
360 /* Expression hash table. */
361 static struct hash_table expr_hash_table;
363 /* Copy propagation hash table. */
364 static struct hash_table set_hash_table;
366 /* Mapping of uids to cuids.
367 Only real insns get cuids. */
368 static int *uid_cuid;
370 /* Highest UID in UID_CUID. */
371 static int max_uid;
373 /* Get the cuid of an insn. */
374 #ifdef ENABLE_CHECKING
375 #define INSN_CUID(INSN) \
376 (gcc_assert (INSN_UID (INSN) <= max_uid), uid_cuid[INSN_UID (INSN)])
377 #else
378 #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
379 #endif
381 /* Number of cuids. */
382 static int max_cuid;
384 /* Mapping of cuids to insns. */
385 static rtx *cuid_insn;
387 /* Get insn from cuid. */
388 #define CUID_INSN(CUID) (cuid_insn[CUID])
390 /* Maximum register number in function prior to doing gcse + 1.
391 Registers created during this pass have regno >= max_gcse_regno.
392 This is named with "gcse" to not collide with global of same name. */
393 static unsigned int max_gcse_regno;
395 /* Table of registers that are modified.
397 For each register, each element is a list of places where the pseudo-reg
398 is set.
400 For simplicity, GCSE is done on sets of pseudo-regs only. PRE GCSE only
401 requires knowledge of which blocks kill which regs [and thus could use
402 a bitmap instead of the lists `reg_set_table' uses].
404 `reg_set_table' and could be turned into an array of bitmaps (num-bbs x
405 num-regs) [however perhaps it may be useful to keep the data as is]. One
406 advantage of recording things this way is that `reg_set_table' is fairly
407 sparse with respect to pseudo regs but for hard regs could be fairly dense
408 [relatively speaking]. And recording sets of pseudo-regs in lists speeds
409 up functions like compute_transp since in the case of pseudo-regs we only
410 need to iterate over the number of times a pseudo-reg is set, not over the
411 number of basic blocks [clearly there is a bit of a slow down in the cases
412 where a pseudo is set more than once in a block, however it is believed
413 that the net effect is to speed things up]. This isn't done for hard-regs
414 because recording call-clobbered hard-regs in `reg_set_table' at each
415 function call can consume a fair bit of memory, and iterating over
416 hard-regs stored this way in compute_transp will be more expensive. */
418 typedef struct reg_set
420 /* The next setting of this register. */
421 struct reg_set *next;
422 /* The index of the block where it was set. */
423 int bb_index;
424 } reg_set;
426 static reg_set **reg_set_table;
428 /* Size of `reg_set_table'.
429 The table starts out at max_gcse_regno + slop, and is enlarged as
430 necessary. */
431 static int reg_set_table_size;
433 /* Amount to grow `reg_set_table' by when it's full. */
434 #define REG_SET_TABLE_SLOP 100
436 /* This is a list of expressions which are MEMs and will be used by load
437 or store motion.
438 Load motion tracks MEMs which aren't killed by
439 anything except itself. (i.e., loads and stores to a single location).
440 We can then allow movement of these MEM refs with a little special
441 allowance. (all stores copy the same value to the reaching reg used
442 for the loads). This means all values used to store into memory must have
443 no side effects so we can re-issue the setter value.
444 Store Motion uses this structure as an expression table to track stores
445 which look interesting, and might be moveable towards the exit block. */
447 struct ls_expr
449 struct expr * expr; /* Gcse expression reference for LM. */
450 rtx pattern; /* Pattern of this mem. */
451 rtx pattern_regs; /* List of registers mentioned by the mem. */
452 rtx loads; /* INSN list of loads seen. */
453 rtx stores; /* INSN list of stores seen. */
454 struct ls_expr * next; /* Next in the list. */
455 int invalid; /* Invalid for some reason. */
456 int index; /* If it maps to a bitmap index. */
457 unsigned int hash_index; /* Index when in a hash table. */
458 rtx reaching_reg; /* Register to use when re-writing. */
461 /* Array of implicit set patterns indexed by basic block index. */
462 static rtx *implicit_sets;
464 /* Head of the list of load/store memory refs. */
465 static struct ls_expr * pre_ldst_mems = NULL;
467 /* Hashtable for the load/store memory refs. */
468 static htab_t pre_ldst_table = NULL;
470 /* Bitmap containing one bit for each register in the program.
471 Used when performing GCSE to track which registers have been set since
472 the start of the basic block. */
473 static regset reg_set_bitmap;
475 /* For each block, a bitmap of registers set in the block.
476 This is used by compute_transp.
477 It is computed during hash table computation and not by compute_sets
478 as it includes registers added since the last pass (or between cprop and
479 gcse) and it's currently not easy to realloc sbitmap vectors. */
480 static sbitmap *reg_set_in_block;
482 /* Array, indexed by basic block number for a list of insns which modify
483 memory within that block. */
484 static rtx * modify_mem_list;
485 static bitmap modify_mem_list_set;
487 /* This array parallels modify_mem_list, but is kept canonicalized. */
488 static rtx * canon_modify_mem_list;
490 /* Bitmap indexed by block numbers to record which blocks contain
491 function calls. */
492 static bitmap blocks_with_calls;
494 /* Various variables for statistics gathering. */
496 /* Memory used in a pass.
497 This isn't intended to be absolutely precise. Its intent is only
498 to keep an eye on memory usage. */
499 static int bytes_used;
501 /* GCSE substitutions made. */
502 static int gcse_subst_count;
503 /* Number of copy instructions created. */
504 static int gcse_create_count;
505 /* Number of local constants propagated. */
506 static int local_const_prop_count;
507 /* Number of local copies propagated. */
508 static int local_copy_prop_count;
509 /* Number of global constants propagated. */
510 static int global_const_prop_count;
511 /* Number of global copies propagated. */
512 static int global_copy_prop_count;
514 /* For available exprs */
515 static sbitmap *ae_kill, *ae_gen;
517 static void compute_can_copy (void);
518 static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
519 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
520 static void *grealloc (void *, size_t);
521 static void *gcse_alloc (unsigned long);
522 static void alloc_gcse_mem (void);
523 static void free_gcse_mem (void);
524 static void alloc_reg_set_mem (int);
525 static void free_reg_set_mem (void);
526 static void record_one_set (int, rtx);
527 static void record_set_info (rtx, rtx, void *);
528 static void compute_sets (void);
529 static void hash_scan_insn (rtx, struct hash_table *, int);
530 static void hash_scan_set (rtx, rtx, struct hash_table *);
531 static void hash_scan_clobber (rtx, rtx, struct hash_table *);
532 static void hash_scan_call (rtx, rtx, struct hash_table *);
533 static int want_to_gcse_p (rtx);
534 static bool can_assign_to_reg_p (rtx);
535 static bool gcse_constant_p (rtx);
536 static int oprs_unchanged_p (rtx, rtx, int);
537 static int oprs_anticipatable_p (rtx, rtx);
538 static int oprs_available_p (rtx, rtx);
539 static void insert_expr_in_table (rtx, enum machine_mode, rtx, int, int,
540 struct hash_table *);
541 static void insert_set_in_table (rtx, rtx, struct hash_table *);
542 static unsigned int hash_expr (rtx, enum machine_mode, int *, int);
543 static unsigned int hash_set (int, int);
544 static int expr_equiv_p (rtx, rtx);
545 static void record_last_reg_set_info (rtx, int);
546 static void record_last_mem_set_info (rtx);
547 static void record_last_set_info (rtx, rtx, void *);
548 static void compute_hash_table (struct hash_table *);
549 static void alloc_hash_table (int, struct hash_table *, int);
550 static void free_hash_table (struct hash_table *);
551 static void compute_hash_table_work (struct hash_table *);
552 static void dump_hash_table (FILE *, const char *, struct hash_table *);
553 static struct expr *lookup_set (unsigned int, struct hash_table *);
554 static struct expr *next_set (unsigned int, struct expr *);
555 static void reset_opr_set_tables (void);
556 static int oprs_not_set_p (rtx, rtx);
557 static void mark_call (rtx);
558 static void mark_set (rtx, rtx);
559 static void mark_clobber (rtx, rtx);
560 static void mark_oprs_set (rtx);
561 static void alloc_cprop_mem (int, int);
562 static void free_cprop_mem (void);
563 static void compute_transp (rtx, int, sbitmap *, int);
564 static void compute_transpout (void);
565 static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
566 struct hash_table *);
567 static void compute_cprop_data (void);
568 static void find_used_regs (rtx *, void *);
569 static int try_replace_reg (rtx, rtx, rtx);
570 static struct expr *find_avail_set (int, rtx);
571 static int cprop_jump (basic_block, rtx, rtx, rtx, rtx);
572 static void mems_conflict_for_gcse_p (rtx, rtx, void *);
573 static int load_killed_in_block_p (basic_block, int, rtx, int);
574 static void canon_list_insert (rtx, rtx, void *);
575 static int cprop_insn (rtx, int);
576 static int cprop (int);
577 static void find_implicit_sets (void);
578 static int one_cprop_pass (int, bool, bool);
579 static bool constprop_register (rtx, rtx, rtx, bool);
580 static struct expr *find_bypass_set (int, int);
581 static bool reg_killed_on_edge (rtx, edge);
582 static int bypass_block (basic_block, rtx, rtx);
583 static int bypass_conditional_jumps (void);
584 static void alloc_pre_mem (int, int);
585 static void free_pre_mem (void);
586 static void compute_pre_data (void);
587 static int pre_expr_reaches_here_p (basic_block, struct expr *,
588 basic_block);
589 static void insert_insn_end_basic_block (struct expr *, basic_block, int);
590 static void pre_insert_copy_insn (struct expr *, rtx);
591 static void pre_insert_copies (void);
592 static int pre_delete (void);
593 static int pre_gcse (void);
594 static int one_pre_gcse_pass (int);
595 static void add_label_notes (rtx, rtx);
596 static void alloc_code_hoist_mem (int, int);
597 static void free_code_hoist_mem (void);
598 static void compute_code_hoist_vbeinout (void);
599 static void compute_code_hoist_data (void);
600 static int hoist_expr_reaches_here_p (basic_block, int, basic_block, char *);
601 static void hoist_code (void);
602 static int one_code_hoisting_pass (void);
603 static rtx process_insert_insn (struct expr *);
604 static int pre_edge_insert (struct edge_list *, struct expr **);
605 static int pre_expr_reaches_here_p_work (basic_block, struct expr *,
606 basic_block, char *);
607 static struct ls_expr * ldst_entry (rtx);
608 static void free_ldst_entry (struct ls_expr *);
609 static void free_ldst_mems (void);
610 static void print_ldst_list (FILE *);
611 static struct ls_expr * find_rtx_in_ldst (rtx);
612 static int enumerate_ldsts (void);
613 static inline struct ls_expr * first_ls_expr (void);
614 static inline struct ls_expr * next_ls_expr (struct ls_expr *);
615 static int simple_mem (rtx);
616 static void invalidate_any_buried_refs (rtx);
617 static void compute_ld_motion_mems (void);
618 static void trim_ld_motion_mems (void);
619 static void update_ld_motion_stores (struct expr *);
620 static void reg_set_info (rtx, rtx, void *);
621 static void reg_clear_last_set (rtx, rtx, void *);
622 static bool store_ops_ok (rtx, int *);
623 static rtx extract_mentioned_regs (rtx);
624 static rtx extract_mentioned_regs_helper (rtx, rtx);
625 static void find_moveable_store (rtx, int *, int *);
626 static int compute_store_table (void);
627 static bool load_kills_store (rtx, rtx, int);
628 static bool find_loads (rtx, rtx, int);
629 static bool store_killed_in_insn (rtx, rtx, rtx, int);
630 static bool store_killed_after (rtx, rtx, rtx, basic_block, int *, rtx *);
631 static bool store_killed_before (rtx, rtx, rtx, basic_block, int *);
632 static void build_store_vectors (void);
633 static void insert_insn_start_basic_block (rtx, basic_block);
634 static int insert_store (struct ls_expr *, edge);
635 static void remove_reachable_equiv_notes (basic_block, struct ls_expr *);
636 static void replace_store_insn (rtx, rtx, basic_block, struct ls_expr *);
637 static void delete_store (struct ls_expr *, basic_block);
638 static void free_store_memory (void);
639 static void store_motion (void);
640 static void free_insn_expr_list_list (rtx *);
641 static void clear_modify_mem_tables (void);
642 static void free_modify_mem_tables (void);
643 static rtx gcse_emit_move_after (rtx, rtx, rtx);
644 static void local_cprop_find_used_regs (rtx *, void *);
645 static bool do_local_cprop (rtx, rtx, bool, rtx*);
646 static bool adjust_libcall_notes (rtx, rtx, rtx, rtx*);
647 static void local_cprop_pass (bool);
648 static bool is_too_expensive (const char *);
651 /* Entry point for global common subexpression elimination.
652 F is the first instruction in the function. Return nonzero if a
653 change is mode. */
655 static int
656 gcse_main (rtx f ATTRIBUTE_UNUSED)
658 int changed, pass;
659 /* Bytes used at start of pass. */
660 int initial_bytes_used;
661 /* Maximum number of bytes used by a pass. */
662 int max_pass_bytes;
663 /* Point to release obstack data from for each pass. */
664 char *gcse_obstack_bottom;
666 /* We do not construct an accurate cfg in functions which call
667 setjmp, so just punt to be safe. */
668 if (current_function_calls_setjmp)
669 return 0;
671 /* Assume that we do not need to run jump optimizations after gcse. */
672 run_jump_opt_after_gcse = 0;
674 /* Identify the basic block information for this function, including
675 successors and predecessors. */
676 max_gcse_regno = max_reg_num ();
678 df_note_add_problem ();
679 df_analyze ();
681 if (dump_file)
682 dump_flow_info (dump_file, dump_flags);
684 /* Return if there's nothing to do, or it is too expensive. */
685 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
686 || is_too_expensive (_("GCSE disabled")))
687 return 0;
689 gcc_obstack_init (&gcse_obstack);
690 bytes_used = 0;
692 /* We need alias. */
693 init_alias_analysis ();
694 /* Record where pseudo-registers are set. This data is kept accurate
695 during each pass. ??? We could also record hard-reg information here
696 [since it's unchanging], however it is currently done during hash table
697 computation.
699 It may be tempting to compute MEM set information here too, but MEM sets
700 will be subject to code motion one day and thus we need to compute
701 information about memory sets when we build the hash tables. */
703 alloc_reg_set_mem (max_gcse_regno);
704 compute_sets ();
706 pass = 0;
707 initial_bytes_used = bytes_used;
708 max_pass_bytes = 0;
709 gcse_obstack_bottom = gcse_alloc (1);
710 changed = 1;
711 while (changed && pass < MAX_GCSE_PASSES)
713 changed = 0;
714 if (dump_file)
715 fprintf (dump_file, "GCSE pass %d\n\n", pass + 1);
717 /* Initialize bytes_used to the space for the pred/succ lists,
718 and the reg_set_table data. */
719 bytes_used = initial_bytes_used;
721 /* Each pass may create new registers, so recalculate each time. */
722 max_gcse_regno = max_reg_num ();
724 alloc_gcse_mem ();
726 /* Don't allow constant propagation to modify jumps
727 during this pass. */
728 timevar_push (TV_CPROP1);
729 changed = one_cprop_pass (pass + 1, false, false);
730 timevar_pop (TV_CPROP1);
732 if (optimize_size)
733 /* Do nothing. */ ;
734 else
736 timevar_push (TV_PRE);
737 changed |= one_pre_gcse_pass (pass + 1);
738 /* We may have just created new basic blocks. Release and
739 recompute various things which are sized on the number of
740 basic blocks. */
741 if (changed)
743 free_modify_mem_tables ();
744 modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
745 canon_modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
747 free_reg_set_mem ();
748 alloc_reg_set_mem (max_reg_num ());
749 compute_sets ();
750 run_jump_opt_after_gcse = 1;
751 timevar_pop (TV_PRE);
754 if (max_pass_bytes < bytes_used)
755 max_pass_bytes = bytes_used;
757 /* Free up memory, then reallocate for code hoisting. We can
758 not re-use the existing allocated memory because the tables
759 will not have info for the insns or registers created by
760 partial redundancy elimination. */
761 free_gcse_mem ();
763 /* It does not make sense to run code hoisting unless we are optimizing
764 for code size -- it rarely makes programs faster, and can make
765 them bigger if we did partial redundancy elimination (when optimizing
766 for space, we don't run the partial redundancy algorithms). */
767 if (optimize_size)
769 timevar_push (TV_HOIST);
770 max_gcse_regno = max_reg_num ();
771 alloc_gcse_mem ();
772 changed |= one_code_hoisting_pass ();
773 free_gcse_mem ();
775 if (max_pass_bytes < bytes_used)
776 max_pass_bytes = bytes_used;
777 timevar_pop (TV_HOIST);
780 if (dump_file)
782 fprintf (dump_file, "\n");
783 fflush (dump_file);
786 obstack_free (&gcse_obstack, gcse_obstack_bottom);
787 pass++;
790 /* Do one last pass of copy propagation, including cprop into
791 conditional jumps. */
793 max_gcse_regno = max_reg_num ();
794 alloc_gcse_mem ();
795 /* This time, go ahead and allow cprop to alter jumps. */
796 timevar_push (TV_CPROP2);
797 one_cprop_pass (pass + 1, true, true);
798 timevar_pop (TV_CPROP2);
799 free_gcse_mem ();
801 if (dump_file)
803 fprintf (dump_file, "GCSE of %s: %d basic blocks, ",
804 current_function_name (), n_basic_blocks);
805 fprintf (dump_file, "%d pass%s, %d bytes\n\n",
806 pass, pass > 1 ? "es" : "", max_pass_bytes);
809 obstack_free (&gcse_obstack, NULL);
810 free_reg_set_mem ();
812 /* We are finished with alias. */
813 end_alias_analysis ();
815 if (!optimize_size && flag_gcse_sm)
817 timevar_push (TV_LSM);
818 store_motion ();
819 timevar_pop (TV_LSM);
822 /* Record where pseudo-registers are set. */
823 return run_jump_opt_after_gcse;
826 /* Misc. utilities. */
828 /* Nonzero for each mode that supports (set (reg) (reg)).
829 This is trivially true for integer and floating point values.
830 It may or may not be true for condition codes. */
831 static char can_copy[(int) NUM_MACHINE_MODES];
833 /* Compute which modes support reg/reg copy operations. */
835 static void
836 compute_can_copy (void)
838 int i;
839 #ifndef AVOID_CCMODE_COPIES
840 rtx reg, insn;
841 #endif
842 memset (can_copy, 0, NUM_MACHINE_MODES);
844 start_sequence ();
845 for (i = 0; i < NUM_MACHINE_MODES; i++)
846 if (GET_MODE_CLASS (i) == MODE_CC)
848 #ifdef AVOID_CCMODE_COPIES
849 can_copy[i] = 0;
850 #else
851 reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
852 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
853 if (recog (PATTERN (insn), insn, NULL) >= 0)
854 can_copy[i] = 1;
855 #endif
857 else
858 can_copy[i] = 1;
860 end_sequence ();
863 /* Returns whether the mode supports reg/reg copy operations. */
865 bool
866 can_copy_p (enum machine_mode mode)
868 static bool can_copy_init_p = false;
870 if (! can_copy_init_p)
872 compute_can_copy ();
873 can_copy_init_p = true;
876 return can_copy[mode] != 0;
879 /* Cover function to xmalloc to record bytes allocated. */
881 static void *
882 gmalloc (size_t size)
884 bytes_used += size;
885 return xmalloc (size);
888 /* Cover function to xcalloc to record bytes allocated. */
890 static void *
891 gcalloc (size_t nelem, size_t elsize)
893 bytes_used += nelem * elsize;
894 return xcalloc (nelem, elsize);
897 /* Cover function to xrealloc.
898 We don't record the additional size since we don't know it.
899 It won't affect memory usage stats much anyway. */
901 static void *
902 grealloc (void *ptr, size_t size)
904 return xrealloc (ptr, size);
907 /* Cover function to obstack_alloc. */
909 static void *
910 gcse_alloc (unsigned long size)
912 bytes_used += size;
913 return obstack_alloc (&gcse_obstack, size);
916 /* Allocate memory for the cuid mapping array,
917 and reg/memory set tracking tables.
919 This is called at the start of each pass. */
921 static void
922 alloc_gcse_mem (void)
924 int i;
925 basic_block bb;
926 rtx insn;
928 /* Find the largest UID and create a mapping from UIDs to CUIDs.
929 CUIDs are like UIDs except they increase monotonically, have no gaps,
930 and only apply to real insns.
931 (Actually, there are gaps, for insn that are not inside a basic block.
932 but we should never see those anyway, so this is OK.) */
934 max_uid = get_max_uid ();
935 uid_cuid = gcalloc (max_uid + 1, sizeof (int));
936 i = 0;
937 FOR_EACH_BB (bb)
938 FOR_BB_INSNS (bb, insn)
940 if (INSN_P (insn))
941 uid_cuid[INSN_UID (insn)] = i++;
942 else
943 uid_cuid[INSN_UID (insn)] = i;
946 /* Create a table mapping cuids to insns. */
948 max_cuid = i;
949 cuid_insn = gcalloc (max_cuid + 1, sizeof (rtx));
950 i = 0;
951 FOR_EACH_BB (bb)
952 FOR_BB_INSNS (bb, insn)
953 if (INSN_P (insn))
954 CUID_INSN (i++) = insn;
956 /* Allocate vars to track sets of regs. */
957 reg_set_bitmap = BITMAP_ALLOC (NULL);
959 /* Allocate vars to track sets of regs, memory per block. */
960 reg_set_in_block = sbitmap_vector_alloc (last_basic_block, max_gcse_regno);
961 /* Allocate array to keep a list of insns which modify memory in each
962 basic block. */
963 modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
964 canon_modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
965 modify_mem_list_set = BITMAP_ALLOC (NULL);
966 blocks_with_calls = BITMAP_ALLOC (NULL);
969 /* Free memory allocated by alloc_gcse_mem. */
971 static void
972 free_gcse_mem (void)
974 free (uid_cuid);
975 free (cuid_insn);
977 BITMAP_FREE (reg_set_bitmap);
979 sbitmap_vector_free (reg_set_in_block);
980 free_modify_mem_tables ();
981 BITMAP_FREE (modify_mem_list_set);
982 BITMAP_FREE (blocks_with_calls);
985 /* Compute the local properties of each recorded expression.
987 Local properties are those that are defined by the block, irrespective of
988 other blocks.
990 An expression is transparent in a block if its operands are not modified
991 in the block.
993 An expression is computed (locally available) in a block if it is computed
994 at least once and expression would contain the same value if the
995 computation was moved to the end of the block.
997 An expression is locally anticipatable in a block if it is computed at
998 least once and expression would contain the same value if the computation
999 was moved to the beginning of the block.
1001 We call this routine for cprop, pre and code hoisting. They all compute
1002 basically the same information and thus can easily share this code.
1004 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
1005 properties. If NULL, then it is not necessary to compute or record that
1006 particular property.
1008 TABLE controls which hash table to look at. If it is set hash table,
1009 additionally, TRANSP is computed as ~TRANSP, since this is really cprop's
1010 ABSALTERED. */
1012 static void
1013 compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc,
1014 struct hash_table *table)
1016 unsigned int i;
1018 /* Initialize any bitmaps that were passed in. */
1019 if (transp)
1021 if (table->set_p)
1022 sbitmap_vector_zero (transp, last_basic_block);
1023 else
1024 sbitmap_vector_ones (transp, last_basic_block);
1027 if (comp)
1028 sbitmap_vector_zero (comp, last_basic_block);
1029 if (antloc)
1030 sbitmap_vector_zero (antloc, last_basic_block);
1032 for (i = 0; i < table->size; i++)
1034 struct expr *expr;
1036 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1038 int indx = expr->bitmap_index;
1039 struct occr *occr;
1041 /* The expression is transparent in this block if it is not killed.
1042 We start by assuming all are transparent [none are killed], and
1043 then reset the bits for those that are. */
1044 if (transp)
1045 compute_transp (expr->expr, indx, transp, table->set_p);
1047 /* The occurrences recorded in antic_occr are exactly those that
1048 we want to set to nonzero in ANTLOC. */
1049 if (antloc)
1050 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
1052 SET_BIT (antloc[BLOCK_NUM (occr->insn)], indx);
1054 /* While we're scanning the table, this is a good place to
1055 initialize this. */
1056 occr->deleted_p = 0;
1059 /* The occurrences recorded in avail_occr are exactly those that
1060 we want to set to nonzero in COMP. */
1061 if (comp)
1062 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
1064 SET_BIT (comp[BLOCK_NUM (occr->insn)], indx);
1066 /* While we're scanning the table, this is a good place to
1067 initialize this. */
1068 occr->copied_p = 0;
1071 /* While we're scanning the table, this is a good place to
1072 initialize this. */
1073 expr->reaching_reg = 0;
1078 /* Register set information.
1080 `reg_set_table' records where each register is set or otherwise
1081 modified. */
1083 static struct obstack reg_set_obstack;
1085 static void
1086 alloc_reg_set_mem (int n_regs)
1088 reg_set_table_size = n_regs + REG_SET_TABLE_SLOP;
1089 reg_set_table = gcalloc (reg_set_table_size, sizeof (struct reg_set *));
1091 gcc_obstack_init (&reg_set_obstack);
1094 static void
1095 free_reg_set_mem (void)
1097 free (reg_set_table);
1098 obstack_free (&reg_set_obstack, NULL);
1101 /* Record REGNO in the reg_set table. */
1103 static void
1104 record_one_set (int regno, rtx insn)
1106 /* Allocate a new reg_set element and link it onto the list. */
1107 struct reg_set *new_reg_info;
1109 /* If the table isn't big enough, enlarge it. */
1110 if (regno >= reg_set_table_size)
1112 int new_size = regno + REG_SET_TABLE_SLOP;
1114 reg_set_table = grealloc (reg_set_table,
1115 new_size * sizeof (struct reg_set *));
1116 memset (reg_set_table + reg_set_table_size, 0,
1117 (new_size - reg_set_table_size) * sizeof (struct reg_set *));
1118 reg_set_table_size = new_size;
1121 new_reg_info = obstack_alloc (&reg_set_obstack, sizeof (struct reg_set));
1122 bytes_used += sizeof (struct reg_set);
1123 new_reg_info->bb_index = BLOCK_NUM (insn);
1124 new_reg_info->next = reg_set_table[regno];
1125 reg_set_table[regno] = new_reg_info;
1128 /* Called from compute_sets via note_stores to handle one SET or CLOBBER in
1129 an insn. The DATA is really the instruction in which the SET is
1130 occurring. */
1132 static void
1133 record_set_info (rtx dest, rtx setter ATTRIBUTE_UNUSED, void *data)
1135 rtx record_set_insn = (rtx) data;
1137 if (REG_P (dest) && REGNO (dest) >= FIRST_PSEUDO_REGISTER)
1138 record_one_set (REGNO (dest), record_set_insn);
1141 /* Scan the function and record each set of each pseudo-register.
1143 This is called once, at the start of the gcse pass. See the comments for
1144 `reg_set_table' for further documentation. */
1146 static void
1147 compute_sets (void)
1149 basic_block bb;
1150 rtx insn;
1152 FOR_EACH_BB (bb)
1153 FOR_BB_INSNS (bb, insn)
1154 if (INSN_P (insn))
1155 note_stores (PATTERN (insn), record_set_info, insn);
1158 /* Hash table support. */
1160 struct reg_avail_info
1162 basic_block last_bb;
1163 int first_set;
1164 int last_set;
1167 static struct reg_avail_info *reg_avail_info;
1168 static basic_block current_bb;
1171 /* See whether X, the source of a set, is something we want to consider for
1172 GCSE. */
1174 static int
1175 want_to_gcse_p (rtx x)
1177 #ifdef STACK_REGS
1178 /* On register stack architectures, don't GCSE constants from the
1179 constant pool, as the benefits are often swamped by the overhead
1180 of shuffling the register stack between basic blocks. */
1181 if (IS_STACK_MODE (GET_MODE (x)))
1182 x = avoid_constant_pool_reference (x);
1183 #endif
1185 switch (GET_CODE (x))
1187 case REG:
1188 case SUBREG:
1189 case CONST_INT:
1190 case CONST_DOUBLE:
1191 case CONST_VECTOR:
1192 case CALL:
1193 return 0;
1195 default:
1196 return can_assign_to_reg_p (x);
1200 /* Used internally by can_assign_to_reg_p. */
1202 static GTY(()) rtx test_insn;
1204 /* Return true if we can assign X to a pseudo register. */
1206 static bool
1207 can_assign_to_reg_p (rtx x)
1209 int num_clobbers = 0;
1210 int icode;
1212 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
1213 if (general_operand (x, GET_MODE (x)))
1214 return 1;
1215 else if (GET_MODE (x) == VOIDmode)
1216 return 0;
1218 /* Otherwise, check if we can make a valid insn from it. First initialize
1219 our test insn if we haven't already. */
1220 if (test_insn == 0)
1222 test_insn
1223 = make_insn_raw (gen_rtx_SET (VOIDmode,
1224 gen_rtx_REG (word_mode,
1225 FIRST_PSEUDO_REGISTER * 2),
1226 const0_rtx));
1227 NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0;
1230 /* Now make an insn like the one we would make when GCSE'ing and see if
1231 valid. */
1232 PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
1233 SET_SRC (PATTERN (test_insn)) = x;
1234 return ((icode = recog (PATTERN (test_insn), test_insn, &num_clobbers)) >= 0
1235 && (num_clobbers == 0 || ! added_clobbers_hard_reg_p (icode)));
1238 /* Return nonzero if the operands of expression X are unchanged from the
1239 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
1240 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
1242 static int
1243 oprs_unchanged_p (rtx x, rtx insn, int avail_p)
1245 int i, j;
1246 enum rtx_code code;
1247 const char *fmt;
1249 if (x == 0)
1250 return 1;
1252 code = GET_CODE (x);
1253 switch (code)
1255 case REG:
1257 struct reg_avail_info *info = &reg_avail_info[REGNO (x)];
1259 if (info->last_bb != current_bb)
1260 return 1;
1261 if (avail_p)
1262 return info->last_set < INSN_CUID (insn);
1263 else
1264 return info->first_set >= INSN_CUID (insn);
1267 case MEM:
1268 if (load_killed_in_block_p (current_bb, INSN_CUID (insn),
1269 x, avail_p))
1270 return 0;
1271 else
1272 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
1274 case PRE_DEC:
1275 case PRE_INC:
1276 case POST_DEC:
1277 case POST_INC:
1278 case PRE_MODIFY:
1279 case POST_MODIFY:
1280 return 0;
1282 case PC:
1283 case CC0: /*FIXME*/
1284 case CONST:
1285 case CONST_INT:
1286 case CONST_DOUBLE:
1287 case CONST_VECTOR:
1288 case SYMBOL_REF:
1289 case LABEL_REF:
1290 case ADDR_VEC:
1291 case ADDR_DIFF_VEC:
1292 return 1;
1294 default:
1295 break;
1298 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1300 if (fmt[i] == 'e')
1302 /* If we are about to do the last recursive call needed at this
1303 level, change it into iteration. This function is called enough
1304 to be worth it. */
1305 if (i == 0)
1306 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
1308 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
1309 return 0;
1311 else if (fmt[i] == 'E')
1312 for (j = 0; j < XVECLEN (x, i); j++)
1313 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
1314 return 0;
1317 return 1;
1320 /* Used for communication between mems_conflict_for_gcse_p and
1321 load_killed_in_block_p. Nonzero if mems_conflict_for_gcse_p finds a
1322 conflict between two memory references. */
1323 static int gcse_mems_conflict_p;
1325 /* Used for communication between mems_conflict_for_gcse_p and
1326 load_killed_in_block_p. A memory reference for a load instruction,
1327 mems_conflict_for_gcse_p will see if a memory store conflicts with
1328 this memory load. */
1329 static rtx gcse_mem_operand;
1331 /* DEST is the output of an instruction. If it is a memory reference, and
1332 possibly conflicts with the load found in gcse_mem_operand, then set
1333 gcse_mems_conflict_p to a nonzero value. */
1335 static void
1336 mems_conflict_for_gcse_p (rtx dest, rtx setter ATTRIBUTE_UNUSED,
1337 void *data ATTRIBUTE_UNUSED)
1339 while (GET_CODE (dest) == SUBREG
1340 || GET_CODE (dest) == ZERO_EXTRACT
1341 || GET_CODE (dest) == STRICT_LOW_PART)
1342 dest = XEXP (dest, 0);
1344 /* If DEST is not a MEM, then it will not conflict with the load. Note
1345 that function calls are assumed to clobber memory, but are handled
1346 elsewhere. */
1347 if (! MEM_P (dest))
1348 return;
1350 /* If we are setting a MEM in our list of specially recognized MEMs,
1351 don't mark as killed this time. */
1353 if (expr_equiv_p (dest, gcse_mem_operand) && pre_ldst_mems != NULL)
1355 if (!find_rtx_in_ldst (dest))
1356 gcse_mems_conflict_p = 1;
1357 return;
1360 if (true_dependence (dest, GET_MODE (dest), gcse_mem_operand,
1361 rtx_addr_varies_p))
1362 gcse_mems_conflict_p = 1;
1365 /* Return nonzero if the expression in X (a memory reference) is killed
1366 in block BB before or after the insn with the CUID in UID_LIMIT.
1367 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
1368 before UID_LIMIT.
1370 To check the entire block, set UID_LIMIT to max_uid + 1 and
1371 AVAIL_P to 0. */
1373 static int
1374 load_killed_in_block_p (basic_block bb, int uid_limit, rtx x, int avail_p)
1376 rtx list_entry = modify_mem_list[bb->index];
1378 /* If this is a readonly then we aren't going to be changing it. */
1379 if (MEM_READONLY_P (x))
1380 return 0;
1382 while (list_entry)
1384 rtx setter;
1385 /* Ignore entries in the list that do not apply. */
1386 if ((avail_p
1387 && INSN_CUID (XEXP (list_entry, 0)) < uid_limit)
1388 || (! avail_p
1389 && INSN_CUID (XEXP (list_entry, 0)) > uid_limit))
1391 list_entry = XEXP (list_entry, 1);
1392 continue;
1395 setter = XEXP (list_entry, 0);
1397 /* If SETTER is a call everything is clobbered. Note that calls
1398 to pure functions are never put on the list, so we need not
1399 worry about them. */
1400 if (CALL_P (setter))
1401 return 1;
1403 /* SETTER must be an INSN of some kind that sets memory. Call
1404 note_stores to examine each hunk of memory that is modified.
1406 The note_stores interface is pretty limited, so we have to
1407 communicate via global variables. Yuk. */
1408 gcse_mem_operand = x;
1409 gcse_mems_conflict_p = 0;
1410 note_stores (PATTERN (setter), mems_conflict_for_gcse_p, NULL);
1411 if (gcse_mems_conflict_p)
1412 return 1;
1413 list_entry = XEXP (list_entry, 1);
1415 return 0;
1418 /* Return nonzero if the operands of expression X are unchanged from
1419 the start of INSN's basic block up to but not including INSN. */
1421 static int
1422 oprs_anticipatable_p (rtx x, rtx insn)
1424 return oprs_unchanged_p (x, insn, 0);
1427 /* Return nonzero if the operands of expression X are unchanged from
1428 INSN to the end of INSN's basic block. */
1430 static int
1431 oprs_available_p (rtx x, rtx insn)
1433 return oprs_unchanged_p (x, insn, 1);
1436 /* Hash expression X.
1438 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1439 indicating if a volatile operand is found or if the expression contains
1440 something we don't want to insert in the table. HASH_TABLE_SIZE is
1441 the current size of the hash table to be probed. */
1443 static unsigned int
1444 hash_expr (rtx x, enum machine_mode mode, int *do_not_record_p,
1445 int hash_table_size)
1447 unsigned int hash;
1449 *do_not_record_p = 0;
1451 hash = hash_rtx (x, mode, do_not_record_p,
1452 NULL, /*have_reg_qty=*/false);
1453 return hash % hash_table_size;
1456 /* Hash a set of register REGNO.
1458 Sets are hashed on the register that is set. This simplifies the PRE copy
1459 propagation code.
1461 ??? May need to make things more elaborate. Later, as necessary. */
1463 static unsigned int
1464 hash_set (int regno, int hash_table_size)
1466 unsigned int hash;
1468 hash = regno;
1469 return hash % hash_table_size;
1472 /* Return nonzero if exp1 is equivalent to exp2. */
1474 static int
1475 expr_equiv_p (rtx x, rtx y)
1477 return exp_equiv_p (x, y, 0, true);
1480 /* Insert expression X in INSN in the hash TABLE.
1481 If it is already present, record it as the last occurrence in INSN's
1482 basic block.
1484 MODE is the mode of the value X is being stored into.
1485 It is only used if X is a CONST_INT.
1487 ANTIC_P is nonzero if X is an anticipatable expression.
1488 AVAIL_P is nonzero if X is an available expression. */
1490 static void
1491 insert_expr_in_table (rtx x, enum machine_mode mode, rtx insn, int antic_p,
1492 int avail_p, struct hash_table *table)
1494 int found, do_not_record_p;
1495 unsigned int hash;
1496 struct expr *cur_expr, *last_expr = NULL;
1497 struct occr *antic_occr, *avail_occr;
1499 hash = hash_expr (x, mode, &do_not_record_p, table->size);
1501 /* Do not insert expression in table if it contains volatile operands,
1502 or if hash_expr determines the expression is something we don't want
1503 to or can't handle. */
1504 if (do_not_record_p)
1505 return;
1507 cur_expr = table->table[hash];
1508 found = 0;
1510 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1512 /* If the expression isn't found, save a pointer to the end of
1513 the list. */
1514 last_expr = cur_expr;
1515 cur_expr = cur_expr->next_same_hash;
1518 if (! found)
1520 cur_expr = gcse_alloc (sizeof (struct expr));
1521 bytes_used += sizeof (struct expr);
1522 if (table->table[hash] == NULL)
1523 /* This is the first pattern that hashed to this index. */
1524 table->table[hash] = cur_expr;
1525 else
1526 /* Add EXPR to end of this hash chain. */
1527 last_expr->next_same_hash = cur_expr;
1529 /* Set the fields of the expr element. */
1530 cur_expr->expr = x;
1531 cur_expr->bitmap_index = table->n_elems++;
1532 cur_expr->next_same_hash = NULL;
1533 cur_expr->antic_occr = NULL;
1534 cur_expr->avail_occr = NULL;
1537 /* Now record the occurrence(s). */
1538 if (antic_p)
1540 antic_occr = cur_expr->antic_occr;
1542 if (antic_occr && BLOCK_NUM (antic_occr->insn) != BLOCK_NUM (insn))
1543 antic_occr = NULL;
1545 if (antic_occr)
1546 /* Found another instance of the expression in the same basic block.
1547 Prefer the currently recorded one. We want the first one in the
1548 block and the block is scanned from start to end. */
1549 ; /* nothing to do */
1550 else
1552 /* First occurrence of this expression in this basic block. */
1553 antic_occr = gcse_alloc (sizeof (struct occr));
1554 bytes_used += sizeof (struct occr);
1555 antic_occr->insn = insn;
1556 antic_occr->next = cur_expr->antic_occr;
1557 antic_occr->deleted_p = 0;
1558 cur_expr->antic_occr = antic_occr;
1562 if (avail_p)
1564 avail_occr = cur_expr->avail_occr;
1566 if (avail_occr && BLOCK_NUM (avail_occr->insn) == BLOCK_NUM (insn))
1568 /* Found another instance of the expression in the same basic block.
1569 Prefer this occurrence to the currently recorded one. We want
1570 the last one in the block and the block is scanned from start
1571 to end. */
1572 avail_occr->insn = insn;
1574 else
1576 /* First occurrence of this expression in this basic block. */
1577 avail_occr = gcse_alloc (sizeof (struct occr));
1578 bytes_used += sizeof (struct occr);
1579 avail_occr->insn = insn;
1580 avail_occr->next = cur_expr->avail_occr;
1581 avail_occr->deleted_p = 0;
1582 cur_expr->avail_occr = avail_occr;
1587 /* Insert pattern X in INSN in the hash table.
1588 X is a SET of a reg to either another reg or a constant.
1589 If it is already present, record it as the last occurrence in INSN's
1590 basic block. */
1592 static void
1593 insert_set_in_table (rtx x, rtx insn, struct hash_table *table)
1595 int found;
1596 unsigned int hash;
1597 struct expr *cur_expr, *last_expr = NULL;
1598 struct occr *cur_occr;
1600 gcc_assert (GET_CODE (x) == SET && REG_P (SET_DEST (x)));
1602 hash = hash_set (REGNO (SET_DEST (x)), table->size);
1604 cur_expr = table->table[hash];
1605 found = 0;
1607 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1609 /* If the expression isn't found, save a pointer to the end of
1610 the list. */
1611 last_expr = cur_expr;
1612 cur_expr = cur_expr->next_same_hash;
1615 if (! found)
1617 cur_expr = gcse_alloc (sizeof (struct expr));
1618 bytes_used += sizeof (struct expr);
1619 if (table->table[hash] == NULL)
1620 /* This is the first pattern that hashed to this index. */
1621 table->table[hash] = cur_expr;
1622 else
1623 /* Add EXPR to end of this hash chain. */
1624 last_expr->next_same_hash = cur_expr;
1626 /* Set the fields of the expr element.
1627 We must copy X because it can be modified when copy propagation is
1628 performed on its operands. */
1629 cur_expr->expr = copy_rtx (x);
1630 cur_expr->bitmap_index = table->n_elems++;
1631 cur_expr->next_same_hash = NULL;
1632 cur_expr->antic_occr = NULL;
1633 cur_expr->avail_occr = NULL;
1636 /* Now record the occurrence. */
1637 cur_occr = cur_expr->avail_occr;
1639 if (cur_occr && BLOCK_NUM (cur_occr->insn) == BLOCK_NUM (insn))
1641 /* Found another instance of the expression in the same basic block.
1642 Prefer this occurrence to the currently recorded one. We want
1643 the last one in the block and the block is scanned from start
1644 to end. */
1645 cur_occr->insn = insn;
1647 else
1649 /* First occurrence of this expression in this basic block. */
1650 cur_occr = gcse_alloc (sizeof (struct occr));
1651 bytes_used += sizeof (struct occr);
1653 cur_occr->insn = insn;
1654 cur_occr->next = cur_expr->avail_occr;
1655 cur_occr->deleted_p = 0;
1656 cur_expr->avail_occr = cur_occr;
1660 /* Determine whether the rtx X should be treated as a constant for
1661 the purposes of GCSE's constant propagation. */
1663 static bool
1664 gcse_constant_p (rtx x)
1666 /* Consider a COMPARE of two integers constant. */
1667 if (GET_CODE (x) == COMPARE
1668 && GET_CODE (XEXP (x, 0)) == CONST_INT
1669 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1670 return true;
1672 /* Consider a COMPARE of the same registers is a constant
1673 if they are not floating point registers. */
1674 if (GET_CODE(x) == COMPARE
1675 && REG_P (XEXP (x, 0)) && REG_P (XEXP (x, 1))
1676 && REGNO (XEXP (x, 0)) == REGNO (XEXP (x, 1))
1677 && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 0)))
1678 && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 1))))
1679 return true;
1681 return CONSTANT_P (x);
1684 /* Scan pattern PAT of INSN and add an entry to the hash TABLE (set or
1685 expression one). */
1687 static void
1688 hash_scan_set (rtx pat, rtx insn, struct hash_table *table)
1690 rtx src = SET_SRC (pat);
1691 rtx dest = SET_DEST (pat);
1692 rtx note;
1694 if (GET_CODE (src) == CALL)
1695 hash_scan_call (src, insn, table);
1697 else if (REG_P (dest))
1699 unsigned int regno = REGNO (dest);
1700 rtx tmp;
1702 /* See if a REG_NOTE shows this equivalent to a simpler expression.
1703 This allows us to do a single GCSE pass and still eliminate
1704 redundant constants, addresses or other expressions that are
1705 constructed with multiple instructions. */
1706 note = find_reg_equal_equiv_note (insn);
1707 if (note != 0
1708 && (table->set_p
1709 ? gcse_constant_p (XEXP (note, 0))
1710 : want_to_gcse_p (XEXP (note, 0))))
1711 src = XEXP (note, 0), pat = gen_rtx_SET (VOIDmode, dest, src);
1713 /* Only record sets of pseudo-regs in the hash table. */
1714 if (! table->set_p
1715 && regno >= FIRST_PSEUDO_REGISTER
1716 /* Don't GCSE something if we can't do a reg/reg copy. */
1717 && can_copy_p (GET_MODE (dest))
1718 /* GCSE commonly inserts instruction after the insn. We can't
1719 do that easily for EH_REGION notes so disable GCSE on these
1720 for now. */
1721 && !find_reg_note (insn, REG_EH_REGION, NULL_RTX)
1722 /* Is SET_SRC something we want to gcse? */
1723 && want_to_gcse_p (src)
1724 /* Don't CSE a nop. */
1725 && ! set_noop_p (pat)
1726 /* Don't GCSE if it has attached REG_EQUIV note.
1727 At this point this only function parameters should have
1728 REG_EQUIV notes and if the argument slot is used somewhere
1729 explicitly, it means address of parameter has been taken,
1730 so we should not extend the lifetime of the pseudo. */
1731 && (note == NULL_RTX || ! MEM_P (XEXP (note, 0))))
1733 /* An expression is not anticipatable if its operands are
1734 modified before this insn or if this is not the only SET in
1735 this insn. The latter condition does not have to mean that
1736 SRC itself is not anticipatable, but we just will not be
1737 able to handle code motion of insns with multiple sets. */
1738 int antic_p = oprs_anticipatable_p (src, insn)
1739 && !multiple_sets (insn);
1740 /* An expression is not available if its operands are
1741 subsequently modified, including this insn. It's also not
1742 available if this is a branch, because we can't insert
1743 a set after the branch. */
1744 int avail_p = (oprs_available_p (src, insn)
1745 && ! JUMP_P (insn));
1747 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p, table);
1750 /* Record sets for constant/copy propagation. */
1751 else if (table->set_p
1752 && regno >= FIRST_PSEUDO_REGISTER
1753 && ((REG_P (src)
1754 && REGNO (src) >= FIRST_PSEUDO_REGISTER
1755 && can_copy_p (GET_MODE (dest))
1756 && REGNO (src) != regno)
1757 || gcse_constant_p (src))
1758 /* A copy is not available if its src or dest is subsequently
1759 modified. Here we want to search from INSN+1 on, but
1760 oprs_available_p searches from INSN on. */
1761 && (insn == BB_END (BLOCK_FOR_INSN (insn))
1762 || ((tmp = next_nonnote_insn (insn)) != NULL_RTX
1763 && oprs_available_p (pat, tmp))))
1764 insert_set_in_table (pat, insn, table);
1766 /* In case of store we want to consider the memory value as available in
1767 the REG stored in that memory. This makes it possible to remove
1768 redundant loads from due to stores to the same location. */
1769 else if (flag_gcse_las && REG_P (src) && MEM_P (dest))
1771 unsigned int regno = REGNO (src);
1773 /* Do not do this for constant/copy propagation. */
1774 if (! table->set_p
1775 /* Only record sets of pseudo-regs in the hash table. */
1776 && regno >= FIRST_PSEUDO_REGISTER
1777 /* Don't GCSE something if we can't do a reg/reg copy. */
1778 && can_copy_p (GET_MODE (src))
1779 /* GCSE commonly inserts instruction after the insn. We can't
1780 do that easily for EH_REGION notes so disable GCSE on these
1781 for now. */
1782 && ! find_reg_note (insn, REG_EH_REGION, NULL_RTX)
1783 /* Is SET_DEST something we want to gcse? */
1784 && want_to_gcse_p (dest)
1785 /* Don't CSE a nop. */
1786 && ! set_noop_p (pat)
1787 /* Don't GCSE if it has attached REG_EQUIV note.
1788 At this point this only function parameters should have
1789 REG_EQUIV notes and if the argument slot is used somewhere
1790 explicitly, it means address of parameter has been taken,
1791 so we should not extend the lifetime of the pseudo. */
1792 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
1793 || ! MEM_P (XEXP (note, 0))))
1795 /* Stores are never anticipatable. */
1796 int antic_p = 0;
1797 /* An expression is not available if its operands are
1798 subsequently modified, including this insn. It's also not
1799 available if this is a branch, because we can't insert
1800 a set after the branch. */
1801 int avail_p = oprs_available_p (dest, insn)
1802 && ! JUMP_P (insn);
1804 /* Record the memory expression (DEST) in the hash table. */
1805 insert_expr_in_table (dest, GET_MODE (dest), insn,
1806 antic_p, avail_p, table);
1811 static void
1812 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1813 struct hash_table *table ATTRIBUTE_UNUSED)
1815 /* Currently nothing to do. */
1818 static void
1819 hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1820 struct hash_table *table ATTRIBUTE_UNUSED)
1822 /* Currently nothing to do. */
1825 /* Process INSN and add hash table entries as appropriate.
1827 Only available expressions that set a single pseudo-reg are recorded.
1829 Single sets in a PARALLEL could be handled, but it's an extra complication
1830 that isn't dealt with right now. The trick is handling the CLOBBERs that
1831 are also in the PARALLEL. Later.
1833 If SET_P is nonzero, this is for the assignment hash table,
1834 otherwise it is for the expression hash table.
1835 If IN_LIBCALL_BLOCK nonzero, we are in a libcall block, and should
1836 not record any expressions. */
1838 static void
1839 hash_scan_insn (rtx insn, struct hash_table *table, int in_libcall_block)
1841 rtx pat = PATTERN (insn);
1842 int i;
1844 if (in_libcall_block)
1845 return;
1847 /* Pick out the sets of INSN and for other forms of instructions record
1848 what's been modified. */
1850 if (GET_CODE (pat) == SET)
1851 hash_scan_set (pat, insn, table);
1852 else if (GET_CODE (pat) == PARALLEL)
1853 for (i = 0; i < XVECLEN (pat, 0); i++)
1855 rtx x = XVECEXP (pat, 0, i);
1857 if (GET_CODE (x) == SET)
1858 hash_scan_set (x, insn, table);
1859 else if (GET_CODE (x) == CLOBBER)
1860 hash_scan_clobber (x, insn, table);
1861 else if (GET_CODE (x) == CALL)
1862 hash_scan_call (x, insn, table);
1865 else if (GET_CODE (pat) == CLOBBER)
1866 hash_scan_clobber (pat, insn, table);
1867 else if (GET_CODE (pat) == CALL)
1868 hash_scan_call (pat, insn, table);
1871 static void
1872 dump_hash_table (FILE *file, const char *name, struct hash_table *table)
1874 int i;
1875 /* Flattened out table, so it's printed in proper order. */
1876 struct expr **flat_table;
1877 unsigned int *hash_val;
1878 struct expr *expr;
1880 flat_table = xcalloc (table->n_elems, sizeof (struct expr *));
1881 hash_val = xmalloc (table->n_elems * sizeof (unsigned int));
1883 for (i = 0; i < (int) table->size; i++)
1884 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1886 flat_table[expr->bitmap_index] = expr;
1887 hash_val[expr->bitmap_index] = i;
1890 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
1891 name, table->size, table->n_elems);
1893 for (i = 0; i < (int) table->n_elems; i++)
1894 if (flat_table[i] != 0)
1896 expr = flat_table[i];
1897 fprintf (file, "Index %d (hash value %d)\n ",
1898 expr->bitmap_index, hash_val[i]);
1899 print_rtl (file, expr->expr);
1900 fprintf (file, "\n");
1903 fprintf (file, "\n");
1905 free (flat_table);
1906 free (hash_val);
1909 /* Record register first/last/block set information for REGNO in INSN.
1911 first_set records the first place in the block where the register
1912 is set and is used to compute "anticipatability".
1914 last_set records the last place in the block where the register
1915 is set and is used to compute "availability".
1917 last_bb records the block for which first_set and last_set are
1918 valid, as a quick test to invalidate them.
1920 reg_set_in_block records whether the register is set in the block
1921 and is used to compute "transparency". */
1923 static void
1924 record_last_reg_set_info (rtx insn, int regno)
1926 struct reg_avail_info *info = &reg_avail_info[regno];
1927 int cuid = INSN_CUID (insn);
1929 info->last_set = cuid;
1930 if (info->last_bb != current_bb)
1932 info->last_bb = current_bb;
1933 info->first_set = cuid;
1934 SET_BIT (reg_set_in_block[current_bb->index], regno);
1939 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
1940 Note we store a pair of elements in the list, so they have to be
1941 taken off pairwise. */
1943 static void
1944 canon_list_insert (rtx dest ATTRIBUTE_UNUSED, rtx unused1 ATTRIBUTE_UNUSED,
1945 void * v_insn)
1947 rtx dest_addr, insn;
1948 int bb;
1950 while (GET_CODE (dest) == SUBREG
1951 || GET_CODE (dest) == ZERO_EXTRACT
1952 || GET_CODE (dest) == STRICT_LOW_PART)
1953 dest = XEXP (dest, 0);
1955 /* If DEST is not a MEM, then it will not conflict with a load. Note
1956 that function calls are assumed to clobber memory, but are handled
1957 elsewhere. */
1959 if (! MEM_P (dest))
1960 return;
1962 dest_addr = get_addr (XEXP (dest, 0));
1963 dest_addr = canon_rtx (dest_addr);
1964 insn = (rtx) v_insn;
1965 bb = BLOCK_NUM (insn);
1967 canon_modify_mem_list[bb] =
1968 alloc_EXPR_LIST (VOIDmode, dest_addr, canon_modify_mem_list[bb]);
1969 canon_modify_mem_list[bb] =
1970 alloc_EXPR_LIST (VOIDmode, dest, canon_modify_mem_list[bb]);
1973 /* Record memory modification information for INSN. We do not actually care
1974 about the memory location(s) that are set, or even how they are set (consider
1975 a CALL_INSN). We merely need to record which insns modify memory. */
1977 static void
1978 record_last_mem_set_info (rtx insn)
1980 int bb = BLOCK_NUM (insn);
1982 /* load_killed_in_block_p will handle the case of calls clobbering
1983 everything. */
1984 modify_mem_list[bb] = alloc_INSN_LIST (insn, modify_mem_list[bb]);
1985 bitmap_set_bit (modify_mem_list_set, bb);
1987 if (CALL_P (insn))
1989 /* Note that traversals of this loop (other than for free-ing)
1990 will break after encountering a CALL_INSN. So, there's no
1991 need to insert a pair of items, as canon_list_insert does. */
1992 canon_modify_mem_list[bb] =
1993 alloc_INSN_LIST (insn, canon_modify_mem_list[bb]);
1994 bitmap_set_bit (blocks_with_calls, bb);
1996 else
1997 note_stores (PATTERN (insn), canon_list_insert, (void*) insn);
2000 /* Called from compute_hash_table via note_stores to handle one
2001 SET or CLOBBER in an insn. DATA is really the instruction in which
2002 the SET is taking place. */
2004 static void
2005 record_last_set_info (rtx dest, rtx setter ATTRIBUTE_UNUSED, void *data)
2007 rtx last_set_insn = (rtx) data;
2009 if (GET_CODE (dest) == SUBREG)
2010 dest = SUBREG_REG (dest);
2012 if (REG_P (dest))
2013 record_last_reg_set_info (last_set_insn, REGNO (dest));
2014 else if (MEM_P (dest)
2015 /* Ignore pushes, they clobber nothing. */
2016 && ! push_operand (dest, GET_MODE (dest)))
2017 record_last_mem_set_info (last_set_insn);
2020 /* Top level function to create an expression or assignment hash table.
2022 Expression entries are placed in the hash table if
2023 - they are of the form (set (pseudo-reg) src),
2024 - src is something we want to perform GCSE on,
2025 - none of the operands are subsequently modified in the block
2027 Assignment entries are placed in the hash table if
2028 - they are of the form (set (pseudo-reg) src),
2029 - src is something we want to perform const/copy propagation on,
2030 - none of the operands or target are subsequently modified in the block
2032 Currently src must be a pseudo-reg or a const_int.
2034 TABLE is the table computed. */
2036 static void
2037 compute_hash_table_work (struct hash_table *table)
2039 unsigned int i;
2041 /* While we compute the hash table we also compute a bit array of which
2042 registers are set in which blocks.
2043 ??? This isn't needed during const/copy propagation, but it's cheap to
2044 compute. Later. */
2045 sbitmap_vector_zero (reg_set_in_block, last_basic_block);
2047 /* re-Cache any INSN_LIST nodes we have allocated. */
2048 clear_modify_mem_tables ();
2049 /* Some working arrays used to track first and last set in each block. */
2050 reg_avail_info = gmalloc (max_gcse_regno * sizeof (struct reg_avail_info));
2052 for (i = 0; i < max_gcse_regno; ++i)
2053 reg_avail_info[i].last_bb = NULL;
2055 FOR_EACH_BB (current_bb)
2057 rtx insn;
2058 unsigned int regno;
2059 int in_libcall_block;
2061 /* First pass over the instructions records information used to
2062 determine when registers and memory are first and last set.
2063 ??? hard-reg reg_set_in_block computation
2064 could be moved to compute_sets since they currently don't change. */
2066 FOR_BB_INSNS (current_bb, insn)
2068 if (! INSN_P (insn))
2069 continue;
2071 if (CALL_P (insn))
2073 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2074 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
2075 record_last_reg_set_info (insn, regno);
2077 mark_call (insn);
2080 note_stores (PATTERN (insn), record_last_set_info, insn);
2083 /* Insert implicit sets in the hash table. */
2084 if (table->set_p
2085 && implicit_sets[current_bb->index] != NULL_RTX)
2086 hash_scan_set (implicit_sets[current_bb->index],
2087 BB_HEAD (current_bb), table);
2089 /* The next pass builds the hash table. */
2090 in_libcall_block = 0;
2091 FOR_BB_INSNS (current_bb, insn)
2092 if (INSN_P (insn))
2094 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
2095 in_libcall_block = 1;
2096 else if (table->set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX))
2097 in_libcall_block = 0;
2098 hash_scan_insn (insn, table, in_libcall_block);
2099 if (!table->set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX))
2100 in_libcall_block = 0;
2104 free (reg_avail_info);
2105 reg_avail_info = NULL;
2108 /* Allocate space for the set/expr hash TABLE.
2109 N_INSNS is the number of instructions in the function.
2110 It is used to determine the number of buckets to use.
2111 SET_P determines whether set or expression table will
2112 be created. */
2114 static void
2115 alloc_hash_table (int n_insns, struct hash_table *table, int set_p)
2117 int n;
2119 table->size = n_insns / 4;
2120 if (table->size < 11)
2121 table->size = 11;
2123 /* Attempt to maintain efficient use of hash table.
2124 Making it an odd number is simplest for now.
2125 ??? Later take some measurements. */
2126 table->size |= 1;
2127 n = table->size * sizeof (struct expr *);
2128 table->table = gmalloc (n);
2129 table->set_p = set_p;
2132 /* Free things allocated by alloc_hash_table. */
2134 static void
2135 free_hash_table (struct hash_table *table)
2137 free (table->table);
2140 /* Compute the hash TABLE for doing copy/const propagation or
2141 expression hash table. */
2143 static void
2144 compute_hash_table (struct hash_table *table)
2146 /* Initialize count of number of entries in hash table. */
2147 table->n_elems = 0;
2148 memset (table->table, 0, table->size * sizeof (struct expr *));
2150 compute_hash_table_work (table);
2153 /* Expression tracking support. */
2155 /* Lookup REGNO in the set TABLE. The result is a pointer to the
2156 table entry, or NULL if not found. */
2158 static struct expr *
2159 lookup_set (unsigned int regno, struct hash_table *table)
2161 unsigned int hash = hash_set (regno, table->size);
2162 struct expr *expr;
2164 expr = table->table[hash];
2166 while (expr && REGNO (SET_DEST (expr->expr)) != regno)
2167 expr = expr->next_same_hash;
2169 return expr;
2172 /* Return the next entry for REGNO in list EXPR. */
2174 static struct expr *
2175 next_set (unsigned int regno, struct expr *expr)
2178 expr = expr->next_same_hash;
2179 while (expr && REGNO (SET_DEST (expr->expr)) != regno);
2181 return expr;
2184 /* Like free_INSN_LIST_list or free_EXPR_LIST_list, except that the node
2185 types may be mixed. */
2187 static void
2188 free_insn_expr_list_list (rtx *listp)
2190 rtx list, next;
2192 for (list = *listp; list ; list = next)
2194 next = XEXP (list, 1);
2195 if (GET_CODE (list) == EXPR_LIST)
2196 free_EXPR_LIST_node (list);
2197 else
2198 free_INSN_LIST_node (list);
2201 *listp = NULL;
2204 /* Clear canon_modify_mem_list and modify_mem_list tables. */
2205 static void
2206 clear_modify_mem_tables (void)
2208 unsigned i;
2209 bitmap_iterator bi;
2211 EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
2213 free_INSN_LIST_list (modify_mem_list + i);
2214 free_insn_expr_list_list (canon_modify_mem_list + i);
2216 bitmap_clear (modify_mem_list_set);
2217 bitmap_clear (blocks_with_calls);
2220 /* Release memory used by modify_mem_list_set. */
2222 static void
2223 free_modify_mem_tables (void)
2225 clear_modify_mem_tables ();
2226 free (modify_mem_list);
2227 free (canon_modify_mem_list);
2228 modify_mem_list = 0;
2229 canon_modify_mem_list = 0;
2232 /* Reset tables used to keep track of what's still available [since the
2233 start of the block]. */
2235 static void
2236 reset_opr_set_tables (void)
2238 /* Maintain a bitmap of which regs have been set since beginning of
2239 the block. */
2240 CLEAR_REG_SET (reg_set_bitmap);
2242 /* Also keep a record of the last instruction to modify memory.
2243 For now this is very trivial, we only record whether any memory
2244 location has been modified. */
2245 clear_modify_mem_tables ();
2248 /* Return nonzero if the operands of X are not set before INSN in
2249 INSN's basic block. */
2251 static int
2252 oprs_not_set_p (rtx x, rtx insn)
2254 int i, j;
2255 enum rtx_code code;
2256 const char *fmt;
2258 if (x == 0)
2259 return 1;
2261 code = GET_CODE (x);
2262 switch (code)
2264 case PC:
2265 case CC0:
2266 case CONST:
2267 case CONST_INT:
2268 case CONST_DOUBLE:
2269 case CONST_VECTOR:
2270 case SYMBOL_REF:
2271 case LABEL_REF:
2272 case ADDR_VEC:
2273 case ADDR_DIFF_VEC:
2274 return 1;
2276 case MEM:
2277 if (load_killed_in_block_p (BLOCK_FOR_INSN (insn),
2278 INSN_CUID (insn), x, 0))
2279 return 0;
2280 else
2281 return oprs_not_set_p (XEXP (x, 0), insn);
2283 case REG:
2284 return ! REGNO_REG_SET_P (reg_set_bitmap, REGNO (x));
2286 default:
2287 break;
2290 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2292 if (fmt[i] == 'e')
2294 /* If we are about to do the last recursive call
2295 needed at this level, change it into iteration.
2296 This function is called enough to be worth it. */
2297 if (i == 0)
2298 return oprs_not_set_p (XEXP (x, i), insn);
2300 if (! oprs_not_set_p (XEXP (x, i), insn))
2301 return 0;
2303 else if (fmt[i] == 'E')
2304 for (j = 0; j < XVECLEN (x, i); j++)
2305 if (! oprs_not_set_p (XVECEXP (x, i, j), insn))
2306 return 0;
2309 return 1;
2312 /* Mark things set by a CALL. */
2314 static void
2315 mark_call (rtx insn)
2317 if (! CONST_OR_PURE_CALL_P (insn))
2318 record_last_mem_set_info (insn);
2321 /* Mark things set by a SET. */
2323 static void
2324 mark_set (rtx pat, rtx insn)
2326 rtx dest = SET_DEST (pat);
2328 while (GET_CODE (dest) == SUBREG
2329 || GET_CODE (dest) == ZERO_EXTRACT
2330 || GET_CODE (dest) == STRICT_LOW_PART)
2331 dest = XEXP (dest, 0);
2333 if (REG_P (dest))
2334 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (dest));
2335 else if (MEM_P (dest))
2336 record_last_mem_set_info (insn);
2338 if (GET_CODE (SET_SRC (pat)) == CALL)
2339 mark_call (insn);
2342 /* Record things set by a CLOBBER. */
2344 static void
2345 mark_clobber (rtx pat, rtx insn)
2347 rtx clob = XEXP (pat, 0);
2349 while (GET_CODE (clob) == SUBREG || GET_CODE (clob) == STRICT_LOW_PART)
2350 clob = XEXP (clob, 0);
2352 if (REG_P (clob))
2353 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (clob));
2354 else
2355 record_last_mem_set_info (insn);
2358 /* Record things set by INSN.
2359 This data is used by oprs_not_set_p. */
2361 static void
2362 mark_oprs_set (rtx insn)
2364 rtx pat = PATTERN (insn);
2365 int i;
2367 if (GET_CODE (pat) == SET)
2368 mark_set (pat, insn);
2369 else if (GET_CODE (pat) == PARALLEL)
2370 for (i = 0; i < XVECLEN (pat, 0); i++)
2372 rtx x = XVECEXP (pat, 0, i);
2374 if (GET_CODE (x) == SET)
2375 mark_set (x, insn);
2376 else if (GET_CODE (x) == CLOBBER)
2377 mark_clobber (x, insn);
2378 else if (GET_CODE (x) == CALL)
2379 mark_call (insn);
2382 else if (GET_CODE (pat) == CLOBBER)
2383 mark_clobber (pat, insn);
2384 else if (GET_CODE (pat) == CALL)
2385 mark_call (insn);
2389 /* Compute copy/constant propagation working variables. */
2391 /* Local properties of assignments. */
2392 static sbitmap *cprop_pavloc;
2393 static sbitmap *cprop_absaltered;
2395 /* Global properties of assignments (computed from the local properties). */
2396 static sbitmap *cprop_avin;
2397 static sbitmap *cprop_avout;
2399 /* Allocate vars used for copy/const propagation. N_BLOCKS is the number of
2400 basic blocks. N_SETS is the number of sets. */
2402 static void
2403 alloc_cprop_mem (int n_blocks, int n_sets)
2405 cprop_pavloc = sbitmap_vector_alloc (n_blocks, n_sets);
2406 cprop_absaltered = sbitmap_vector_alloc (n_blocks, n_sets);
2408 cprop_avin = sbitmap_vector_alloc (n_blocks, n_sets);
2409 cprop_avout = sbitmap_vector_alloc (n_blocks, n_sets);
2412 /* Free vars used by copy/const propagation. */
2414 static void
2415 free_cprop_mem (void)
2417 sbitmap_vector_free (cprop_pavloc);
2418 sbitmap_vector_free (cprop_absaltered);
2419 sbitmap_vector_free (cprop_avin);
2420 sbitmap_vector_free (cprop_avout);
2423 /* For each block, compute whether X is transparent. X is either an
2424 expression or an assignment [though we don't care which, for this context
2425 an assignment is treated as an expression]. For each block where an
2426 element of X is modified, set (SET_P == 1) or reset (SET_P == 0) the INDX
2427 bit in BMAP. */
2429 static void
2430 compute_transp (rtx x, int indx, sbitmap *bmap, int set_p)
2432 int i, j;
2433 basic_block bb;
2434 enum rtx_code code;
2435 reg_set *r;
2436 const char *fmt;
2438 /* repeat is used to turn tail-recursion into iteration since GCC
2439 can't do it when there's no return value. */
2440 repeat:
2442 if (x == 0)
2443 return;
2445 code = GET_CODE (x);
2446 switch (code)
2448 case REG:
2449 if (set_p)
2451 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
2453 FOR_EACH_BB (bb)
2454 if (TEST_BIT (reg_set_in_block[bb->index], REGNO (x)))
2455 SET_BIT (bmap[bb->index], indx);
2457 else
2459 for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
2460 SET_BIT (bmap[r->bb_index], indx);
2463 else
2465 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
2467 FOR_EACH_BB (bb)
2468 if (TEST_BIT (reg_set_in_block[bb->index], REGNO (x)))
2469 RESET_BIT (bmap[bb->index], indx);
2471 else
2473 for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
2474 RESET_BIT (bmap[r->bb_index], indx);
2478 return;
2480 case MEM:
2481 if (! MEM_READONLY_P (x))
2483 bitmap_iterator bi;
2484 unsigned bb_index;
2486 /* First handle all the blocks with calls. We don't need to
2487 do any list walking for them. */
2488 EXECUTE_IF_SET_IN_BITMAP (blocks_with_calls, 0, bb_index, bi)
2490 if (set_p)
2491 SET_BIT (bmap[bb_index], indx);
2492 else
2493 RESET_BIT (bmap[bb_index], indx);
2496 /* Now iterate over the blocks which have memory modifications
2497 but which do not have any calls. */
2498 EXECUTE_IF_AND_COMPL_IN_BITMAP (modify_mem_list_set,
2499 blocks_with_calls,
2500 0, bb_index, bi)
2502 rtx list_entry = canon_modify_mem_list[bb_index];
2504 while (list_entry)
2506 rtx dest, dest_addr;
2508 /* LIST_ENTRY must be an INSN of some kind that sets memory.
2509 Examine each hunk of memory that is modified. */
2511 dest = XEXP (list_entry, 0);
2512 list_entry = XEXP (list_entry, 1);
2513 dest_addr = XEXP (list_entry, 0);
2515 if (canon_true_dependence (dest, GET_MODE (dest), dest_addr,
2516 x, rtx_addr_varies_p))
2518 if (set_p)
2519 SET_BIT (bmap[bb_index], indx);
2520 else
2521 RESET_BIT (bmap[bb_index], indx);
2522 break;
2524 list_entry = XEXP (list_entry, 1);
2529 x = XEXP (x, 0);
2530 goto repeat;
2532 case PC:
2533 case CC0: /*FIXME*/
2534 case CONST:
2535 case CONST_INT:
2536 case CONST_DOUBLE:
2537 case CONST_VECTOR:
2538 case SYMBOL_REF:
2539 case LABEL_REF:
2540 case ADDR_VEC:
2541 case ADDR_DIFF_VEC:
2542 return;
2544 default:
2545 break;
2548 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2550 if (fmt[i] == 'e')
2552 /* If we are about to do the last recursive call
2553 needed at this level, change it into iteration.
2554 This function is called enough to be worth it. */
2555 if (i == 0)
2557 x = XEXP (x, i);
2558 goto repeat;
2561 compute_transp (XEXP (x, i), indx, bmap, set_p);
2563 else if (fmt[i] == 'E')
2564 for (j = 0; j < XVECLEN (x, i); j++)
2565 compute_transp (XVECEXP (x, i, j), indx, bmap, set_p);
2569 /* Top level routine to do the dataflow analysis needed by copy/const
2570 propagation. */
2572 static void
2573 compute_cprop_data (void)
2575 compute_local_properties (cprop_absaltered, cprop_pavloc, NULL, &set_hash_table);
2576 compute_available (cprop_pavloc, cprop_absaltered,
2577 cprop_avout, cprop_avin);
2580 /* Copy/constant propagation. */
2582 /* Maximum number of register uses in an insn that we handle. */
2583 #define MAX_USES 8
2585 /* Table of uses found in an insn.
2586 Allocated statically to avoid alloc/free complexity and overhead. */
2587 static struct reg_use reg_use_table[MAX_USES];
2589 /* Index into `reg_use_table' while building it. */
2590 static int reg_use_count;
2592 /* Set up a list of register numbers used in INSN. The found uses are stored
2593 in `reg_use_table'. `reg_use_count' is initialized to zero before entry,
2594 and contains the number of uses in the table upon exit.
2596 ??? If a register appears multiple times we will record it multiple times.
2597 This doesn't hurt anything but it will slow things down. */
2599 static void
2600 find_used_regs (rtx *xptr, void *data ATTRIBUTE_UNUSED)
2602 int i, j;
2603 enum rtx_code code;
2604 const char *fmt;
2605 rtx x = *xptr;
2607 /* repeat is used to turn tail-recursion into iteration since GCC
2608 can't do it when there's no return value. */
2609 repeat:
2610 if (x == 0)
2611 return;
2613 code = GET_CODE (x);
2614 if (REG_P (x))
2616 if (reg_use_count == MAX_USES)
2617 return;
2619 reg_use_table[reg_use_count].reg_rtx = x;
2620 reg_use_count++;
2623 /* Recursively scan the operands of this expression. */
2625 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2627 if (fmt[i] == 'e')
2629 /* If we are about to do the last recursive call
2630 needed at this level, change it into iteration.
2631 This function is called enough to be worth it. */
2632 if (i == 0)
2634 x = XEXP (x, 0);
2635 goto repeat;
2638 find_used_regs (&XEXP (x, i), data);
2640 else if (fmt[i] == 'E')
2641 for (j = 0; j < XVECLEN (x, i); j++)
2642 find_used_regs (&XVECEXP (x, i, j), data);
2646 /* Try to replace all non-SET_DEST occurrences of FROM in INSN with TO.
2647 Returns nonzero is successful. */
2649 static int
2650 try_replace_reg (rtx from, rtx to, rtx insn)
2652 rtx note = find_reg_equal_equiv_note (insn);
2653 rtx src = 0;
2654 int success = 0;
2655 rtx set = single_set (insn);
2657 /* Usually we substitute easy stuff, so we won't copy everything.
2658 We however need to take care to not duplicate non-trivial CONST
2659 expressions. */
2660 to = copy_rtx (to);
2662 validate_replace_src_group (from, to, insn);
2663 if (num_changes_pending () && apply_change_group ())
2664 success = 1;
2666 /* Try to simplify SET_SRC if we have substituted a constant. */
2667 if (success && set && CONSTANT_P (to))
2669 src = simplify_rtx (SET_SRC (set));
2671 if (src)
2672 validate_change (insn, &SET_SRC (set), src, 0);
2675 /* If there is already a REG_EQUAL note, update the expression in it
2676 with our replacement. */
2677 if (note != 0 && REG_NOTE_KIND (note) == REG_EQUAL)
2678 set_unique_reg_note (insn, REG_EQUAL,
2679 simplify_replace_rtx (XEXP (note, 0), from, to));
2680 if (!success && set && reg_mentioned_p (from, SET_SRC (set)))
2682 /* If above failed and this is a single set, try to simplify the source of
2683 the set given our substitution. We could perhaps try this for multiple
2684 SETs, but it probably won't buy us anything. */
2685 src = simplify_replace_rtx (SET_SRC (set), from, to);
2687 if (!rtx_equal_p (src, SET_SRC (set))
2688 && validate_change (insn, &SET_SRC (set), src, 0))
2689 success = 1;
2691 /* If we've failed to do replacement, have a single SET, don't already
2692 have a note, and have no special SET, add a REG_EQUAL note to not
2693 lose information. */
2694 if (!success && note == 0 && set != 0
2695 && GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
2696 && GET_CODE (SET_DEST (set)) != STRICT_LOW_PART)
2697 note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
2700 /* REG_EQUAL may get simplified into register.
2701 We don't allow that. Remove that note. This code ought
2702 not to happen, because previous code ought to synthesize
2703 reg-reg move, but be on the safe side. */
2704 if (note && REG_NOTE_KIND (note) == REG_EQUAL && REG_P (XEXP (note, 0)))
2705 remove_note (insn, note);
2707 return success;
2710 /* Find a set of REGNOs that are available on entry to INSN's block. Returns
2711 NULL no such set is found. */
2713 static struct expr *
2714 find_avail_set (int regno, rtx insn)
2716 /* SET1 contains the last set found that can be returned to the caller for
2717 use in a substitution. */
2718 struct expr *set1 = 0;
2720 /* Loops are not possible here. To get a loop we would need two sets
2721 available at the start of the block containing INSN. i.e. we would
2722 need two sets like this available at the start of the block:
2724 (set (reg X) (reg Y))
2725 (set (reg Y) (reg X))
2727 This can not happen since the set of (reg Y) would have killed the
2728 set of (reg X) making it unavailable at the start of this block. */
2729 while (1)
2731 rtx src;
2732 struct expr *set = lookup_set (regno, &set_hash_table);
2734 /* Find a set that is available at the start of the block
2735 which contains INSN. */
2736 while (set)
2738 if (TEST_BIT (cprop_avin[BLOCK_NUM (insn)], set->bitmap_index))
2739 break;
2740 set = next_set (regno, set);
2743 /* If no available set was found we've reached the end of the
2744 (possibly empty) copy chain. */
2745 if (set == 0)
2746 break;
2748 gcc_assert (GET_CODE (set->expr) == SET);
2750 src = SET_SRC (set->expr);
2752 /* We know the set is available.
2753 Now check that SRC is ANTLOC (i.e. none of the source operands
2754 have changed since the start of the block).
2756 If the source operand changed, we may still use it for the next
2757 iteration of this loop, but we may not use it for substitutions. */
2759 if (gcse_constant_p (src) || oprs_not_set_p (src, insn))
2760 set1 = set;
2762 /* If the source of the set is anything except a register, then
2763 we have reached the end of the copy chain. */
2764 if (! REG_P (src))
2765 break;
2767 /* Follow the copy chain, i.e. start another iteration of the loop
2768 and see if we have an available copy into SRC. */
2769 regno = REGNO (src);
2772 /* SET1 holds the last set that was available and anticipatable at
2773 INSN. */
2774 return set1;
2777 /* Subroutine of cprop_insn that tries to propagate constants into
2778 JUMP_INSNS. JUMP must be a conditional jump. If SETCC is non-NULL
2779 it is the instruction that immediately precedes JUMP, and must be a
2780 single SET of a register. FROM is what we will try to replace,
2781 SRC is the constant we will try to substitute for it. Returns nonzero
2782 if a change was made. */
2784 static int
2785 cprop_jump (basic_block bb, rtx setcc, rtx jump, rtx from, rtx src)
2787 rtx new, set_src, note_src;
2788 rtx set = pc_set (jump);
2789 rtx note = find_reg_equal_equiv_note (jump);
2791 if (note)
2793 note_src = XEXP (note, 0);
2794 if (GET_CODE (note_src) == EXPR_LIST)
2795 note_src = NULL_RTX;
2797 else note_src = NULL_RTX;
2799 /* Prefer REG_EQUAL notes except those containing EXPR_LISTs. */
2800 set_src = note_src ? note_src : SET_SRC (set);
2802 /* First substitute the SETCC condition into the JUMP instruction,
2803 then substitute that given values into this expanded JUMP. */
2804 if (setcc != NULL_RTX
2805 && !modified_between_p (from, setcc, jump)
2806 && !modified_between_p (src, setcc, jump))
2808 rtx setcc_src;
2809 rtx setcc_set = single_set (setcc);
2810 rtx setcc_note = find_reg_equal_equiv_note (setcc);
2811 setcc_src = (setcc_note && GET_CODE (XEXP (setcc_note, 0)) != EXPR_LIST)
2812 ? XEXP (setcc_note, 0) : SET_SRC (setcc_set);
2813 set_src = simplify_replace_rtx (set_src, SET_DEST (setcc_set),
2814 setcc_src);
2816 else
2817 setcc = NULL_RTX;
2819 new = simplify_replace_rtx (set_src, from, src);
2821 /* If no simplification can be made, then try the next register. */
2822 if (rtx_equal_p (new, SET_SRC (set)))
2823 return 0;
2825 /* If this is now a no-op delete it, otherwise this must be a valid insn. */
2826 if (new == pc_rtx)
2827 delete_insn (jump);
2828 else
2830 /* Ensure the value computed inside the jump insn to be equivalent
2831 to one computed by setcc. */
2832 if (setcc && modified_in_p (new, setcc))
2833 return 0;
2834 if (! validate_change (jump, &SET_SRC (set), new, 0))
2836 /* When (some) constants are not valid in a comparison, and there
2837 are two registers to be replaced by constants before the entire
2838 comparison can be folded into a constant, we need to keep
2839 intermediate information in REG_EQUAL notes. For targets with
2840 separate compare insns, such notes are added by try_replace_reg.
2841 When we have a combined compare-and-branch instruction, however,
2842 we need to attach a note to the branch itself to make this
2843 optimization work. */
2845 if (!rtx_equal_p (new, note_src))
2846 set_unique_reg_note (jump, REG_EQUAL, copy_rtx (new));
2847 return 0;
2850 /* Remove REG_EQUAL note after simplification. */
2851 if (note_src)
2852 remove_note (jump, note);
2855 #ifdef HAVE_cc0
2856 /* Delete the cc0 setter. */
2857 if (setcc != NULL && CC0_P (SET_DEST (single_set (setcc))))
2858 delete_insn (setcc);
2859 #endif
2861 run_jump_opt_after_gcse = 1;
2863 global_const_prop_count++;
2864 if (dump_file != NULL)
2866 fprintf (dump_file,
2867 "GLOBAL CONST-PROP: Replacing reg %d in jump_insn %d with constant ",
2868 REGNO (from), INSN_UID (jump));
2869 print_rtl (dump_file, src);
2870 fprintf (dump_file, "\n");
2872 purge_dead_edges (bb);
2874 return 1;
2877 static bool
2878 constprop_register (rtx insn, rtx from, rtx to, bool alter_jumps)
2880 rtx sset;
2882 /* Check for reg or cc0 setting instructions followed by
2883 conditional branch instructions first. */
2884 if (alter_jumps
2885 && (sset = single_set (insn)) != NULL
2886 && NEXT_INSN (insn)
2887 && any_condjump_p (NEXT_INSN (insn)) && onlyjump_p (NEXT_INSN (insn)))
2889 rtx dest = SET_DEST (sset);
2890 if ((REG_P (dest) || CC0_P (dest))
2891 && cprop_jump (BLOCK_FOR_INSN (insn), insn, NEXT_INSN (insn), from, to))
2892 return 1;
2895 /* Handle normal insns next. */
2896 if (NONJUMP_INSN_P (insn)
2897 && try_replace_reg (from, to, insn))
2898 return 1;
2900 /* Try to propagate a CONST_INT into a conditional jump.
2901 We're pretty specific about what we will handle in this
2902 code, we can extend this as necessary over time.
2904 Right now the insn in question must look like
2905 (set (pc) (if_then_else ...)) */
2906 else if (alter_jumps && any_condjump_p (insn) && onlyjump_p (insn))
2907 return cprop_jump (BLOCK_FOR_INSN (insn), NULL, insn, from, to);
2908 return 0;
2911 /* Perform constant and copy propagation on INSN.
2912 The result is nonzero if a change was made. */
2914 static int
2915 cprop_insn (rtx insn, int alter_jumps)
2917 struct reg_use *reg_used;
2918 int changed = 0;
2919 rtx note;
2921 if (!INSN_P (insn))
2922 return 0;
2924 reg_use_count = 0;
2925 note_uses (&PATTERN (insn), find_used_regs, NULL);
2927 note = find_reg_equal_equiv_note (insn);
2929 /* We may win even when propagating constants into notes. */
2930 if (note)
2931 find_used_regs (&XEXP (note, 0), NULL);
2933 for (reg_used = &reg_use_table[0]; reg_use_count > 0;
2934 reg_used++, reg_use_count--)
2936 unsigned int regno = REGNO (reg_used->reg_rtx);
2937 rtx pat, src;
2938 struct expr *set;
2940 /* Ignore registers created by GCSE.
2941 We do this because ... */
2942 if (regno >= max_gcse_regno)
2943 continue;
2945 /* If the register has already been set in this block, there's
2946 nothing we can do. */
2947 if (! oprs_not_set_p (reg_used->reg_rtx, insn))
2948 continue;
2950 /* Find an assignment that sets reg_used and is available
2951 at the start of the block. */
2952 set = find_avail_set (regno, insn);
2953 if (! set)
2954 continue;
2956 pat = set->expr;
2957 /* ??? We might be able to handle PARALLELs. Later. */
2958 gcc_assert (GET_CODE (pat) == SET);
2960 src = SET_SRC (pat);
2962 /* Constant propagation. */
2963 if (gcse_constant_p (src))
2965 if (constprop_register (insn, reg_used->reg_rtx, src, alter_jumps))
2967 changed = 1;
2968 global_const_prop_count++;
2969 if (dump_file != NULL)
2971 fprintf (dump_file, "GLOBAL CONST-PROP: Replacing reg %d in ", regno);
2972 fprintf (dump_file, "insn %d with constant ", INSN_UID (insn));
2973 print_rtl (dump_file, src);
2974 fprintf (dump_file, "\n");
2976 if (INSN_DELETED_P (insn))
2977 return 1;
2980 else if (REG_P (src)
2981 && REGNO (src) >= FIRST_PSEUDO_REGISTER
2982 && REGNO (src) != regno)
2984 if (try_replace_reg (reg_used->reg_rtx, src, insn))
2986 changed = 1;
2987 global_copy_prop_count++;
2988 if (dump_file != NULL)
2990 fprintf (dump_file, "GLOBAL COPY-PROP: Replacing reg %d in insn %d",
2991 regno, INSN_UID (insn));
2992 fprintf (dump_file, " with reg %d\n", REGNO (src));
2995 /* The original insn setting reg_used may or may not now be
2996 deletable. We leave the deletion to flow. */
2997 /* FIXME: If it turns out that the insn isn't deletable,
2998 then we may have unnecessarily extended register lifetimes
2999 and made things worse. */
3004 return changed;
3007 /* Like find_used_regs, but avoid recording uses that appear in
3008 input-output contexts such as zero_extract or pre_dec. This
3009 restricts the cases we consider to those for which local cprop
3010 can legitimately make replacements. */
3012 static void
3013 local_cprop_find_used_regs (rtx *xptr, void *data)
3015 rtx x = *xptr;
3017 if (x == 0)
3018 return;
3020 switch (GET_CODE (x))
3022 case ZERO_EXTRACT:
3023 case SIGN_EXTRACT:
3024 case STRICT_LOW_PART:
3025 return;
3027 case PRE_DEC:
3028 case PRE_INC:
3029 case POST_DEC:
3030 case POST_INC:
3031 case PRE_MODIFY:
3032 case POST_MODIFY:
3033 /* Can only legitimately appear this early in the context of
3034 stack pushes for function arguments, but handle all of the
3035 codes nonetheless. */
3036 return;
3038 case SUBREG:
3039 /* Setting a subreg of a register larger than word_mode leaves
3040 the non-written words unchanged. */
3041 if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) > BITS_PER_WORD)
3042 return;
3043 break;
3045 default:
3046 break;
3049 find_used_regs (xptr, data);
3052 /* LIBCALL_SP is a zero-terminated array of insns at the end of a libcall;
3053 their REG_EQUAL notes need updating. */
3055 static bool
3056 do_local_cprop (rtx x, rtx insn, bool alter_jumps, rtx *libcall_sp)
3058 rtx newreg = NULL, newcnst = NULL;
3060 /* Rule out USE instructions and ASM statements as we don't want to
3061 change the hard registers mentioned. */
3062 if (REG_P (x)
3063 && (REGNO (x) >= FIRST_PSEUDO_REGISTER
3064 || (GET_CODE (PATTERN (insn)) != USE
3065 && asm_noperands (PATTERN (insn)) < 0)))
3067 cselib_val *val = cselib_lookup (x, GET_MODE (x), 0);
3068 struct elt_loc_list *l;
3070 if (!val)
3071 return false;
3072 for (l = val->locs; l; l = l->next)
3074 rtx this_rtx = l->loc;
3075 rtx note;
3077 /* Don't CSE non-constant values out of libcall blocks. */
3078 if (l->in_libcall && ! CONSTANT_P (this_rtx))
3079 continue;
3081 if (gcse_constant_p (this_rtx))
3082 newcnst = this_rtx;
3083 if (REG_P (this_rtx) && REGNO (this_rtx) >= FIRST_PSEUDO_REGISTER
3084 /* Don't copy propagate if it has attached REG_EQUIV note.
3085 At this point this only function parameters should have
3086 REG_EQUIV notes and if the argument slot is used somewhere
3087 explicitly, it means address of parameter has been taken,
3088 so we should not extend the lifetime of the pseudo. */
3089 && (!(note = find_reg_note (l->setting_insn, REG_EQUIV, NULL_RTX))
3090 || ! MEM_P (XEXP (note, 0))))
3091 newreg = this_rtx;
3093 if (newcnst && constprop_register (insn, x, newcnst, alter_jumps))
3095 /* If we find a case where we can't fix the retval REG_EQUAL notes
3096 match the new register, we either have to abandon this replacement
3097 or fix delete_trivially_dead_insns to preserve the setting insn,
3098 or make it delete the REG_EQUAL note, and fix up all passes that
3099 require the REG_EQUAL note there. */
3100 bool adjusted;
3102 adjusted = adjust_libcall_notes (x, newcnst, insn, libcall_sp);
3103 gcc_assert (adjusted);
3105 if (dump_file != NULL)
3107 fprintf (dump_file, "LOCAL CONST-PROP: Replacing reg %d in ",
3108 REGNO (x));
3109 fprintf (dump_file, "insn %d with constant ",
3110 INSN_UID (insn));
3111 print_rtl (dump_file, newcnst);
3112 fprintf (dump_file, "\n");
3114 local_const_prop_count++;
3115 return true;
3117 else if (newreg && newreg != x && try_replace_reg (x, newreg, insn))
3119 adjust_libcall_notes (x, newreg, insn, libcall_sp);
3120 if (dump_file != NULL)
3122 fprintf (dump_file,
3123 "LOCAL COPY-PROP: Replacing reg %d in insn %d",
3124 REGNO (x), INSN_UID (insn));
3125 fprintf (dump_file, " with reg %d\n", REGNO (newreg));
3127 local_copy_prop_count++;
3128 return true;
3131 return false;
3134 /* LIBCALL_SP is a zero-terminated array of insns at the end of a libcall;
3135 their REG_EQUAL notes need updating to reflect that OLDREG has been
3136 replaced with NEWVAL in INSN. Return true if all substitutions could
3137 be made. */
3138 static bool
3139 adjust_libcall_notes (rtx oldreg, rtx newval, rtx insn, rtx *libcall_sp)
3141 rtx end;
3143 while ((end = *libcall_sp++))
3145 rtx note = find_reg_equal_equiv_note (end);
3147 if (! note)
3148 continue;
3150 if (REG_P (newval))
3152 if (reg_set_between_p (newval, PREV_INSN (insn), end))
3156 note = find_reg_equal_equiv_note (end);
3157 if (! note)
3158 continue;
3159 if (reg_mentioned_p (newval, XEXP (note, 0)))
3160 return false;
3162 while ((end = *libcall_sp++));
3163 return true;
3166 XEXP (note, 0) = simplify_replace_rtx (XEXP (note, 0), oldreg, newval);
3167 df_notes_rescan (end);
3168 insn = end;
3170 return true;
3173 #define MAX_NESTED_LIBCALLS 9
3175 /* Do local const/copy propagation (i.e. within each basic block).
3176 If ALTER_JUMPS is true, allow propagating into jump insns, which
3177 could modify the CFG. */
3179 static void
3180 local_cprop_pass (bool alter_jumps)
3182 basic_block bb;
3183 rtx insn;
3184 struct reg_use *reg_used;
3185 rtx libcall_stack[MAX_NESTED_LIBCALLS + 1], *libcall_sp;
3186 bool changed = false;
3188 cselib_init (false);
3189 libcall_sp = &libcall_stack[MAX_NESTED_LIBCALLS];
3190 *libcall_sp = 0;
3191 FOR_EACH_BB (bb)
3193 FOR_BB_INSNS (bb, insn)
3195 if (INSN_P (insn))
3197 rtx note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
3199 if (note)
3201 gcc_assert (libcall_sp != libcall_stack);
3202 *--libcall_sp = XEXP (note, 0);
3204 note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
3205 if (note)
3206 libcall_sp++;
3207 note = find_reg_equal_equiv_note (insn);
3210 reg_use_count = 0;
3211 note_uses (&PATTERN (insn), local_cprop_find_used_regs,
3212 NULL);
3213 if (note)
3214 local_cprop_find_used_regs (&XEXP (note, 0), NULL);
3216 for (reg_used = &reg_use_table[0]; reg_use_count > 0;
3217 reg_used++, reg_use_count--)
3219 if (do_local_cprop (reg_used->reg_rtx, insn, alter_jumps,
3220 libcall_sp))
3222 changed = true;
3223 break;
3226 if (INSN_DELETED_P (insn))
3227 break;
3229 while (reg_use_count);
3231 cselib_process_insn (insn);
3234 /* Forget everything at the end of a basic block. Make sure we are
3235 not inside a libcall, they should never cross basic blocks. */
3236 cselib_clear_table ();
3237 gcc_assert (libcall_sp == &libcall_stack[MAX_NESTED_LIBCALLS]);
3240 cselib_finish ();
3242 /* Global analysis may get into infinite loops for unreachable blocks. */
3243 if (changed && alter_jumps)
3245 delete_unreachable_blocks ();
3246 free_reg_set_mem ();
3247 alloc_reg_set_mem (max_reg_num ());
3248 compute_sets ();
3252 /* Forward propagate copies. This includes copies and constants. Return
3253 nonzero if a change was made. */
3255 static int
3256 cprop (int alter_jumps)
3258 int changed;
3259 basic_block bb;
3260 rtx insn;
3262 /* Note we start at block 1. */
3263 if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
3265 if (dump_file != NULL)
3266 fprintf (dump_file, "\n");
3267 return 0;
3270 changed = 0;
3271 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb)
3273 /* Reset tables used to keep track of what's still valid [since the
3274 start of the block]. */
3275 reset_opr_set_tables ();
3277 FOR_BB_INSNS (bb, insn)
3278 if (INSN_P (insn))
3280 changed |= cprop_insn (insn, alter_jumps);
3282 /* Keep track of everything modified by this insn. */
3283 /* ??? Need to be careful w.r.t. mods done to INSN. Don't
3284 call mark_oprs_set if we turned the insn into a NOTE. */
3285 if (! NOTE_P (insn))
3286 mark_oprs_set (insn);
3290 if (dump_file != NULL)
3291 fprintf (dump_file, "\n");
3293 return changed;
3296 /* Similar to get_condition, only the resulting condition must be
3297 valid at JUMP, instead of at EARLIEST.
3299 This differs from noce_get_condition in ifcvt.c in that we prefer not to
3300 settle for the condition variable in the jump instruction being integral.
3301 We prefer to be able to record the value of a user variable, rather than
3302 the value of a temporary used in a condition. This could be solved by
3303 recording the value of *every* register scanned by canonicalize_condition,
3304 but this would require some code reorganization. */
3307 fis_get_condition (rtx jump)
3309 return get_condition (jump, NULL, false, true);
3312 /* Check the comparison COND to see if we can safely form an implicit set from
3313 it. COND is either an EQ or NE comparison. */
3315 static bool
3316 implicit_set_cond_p (rtx cond)
3318 enum machine_mode mode = GET_MODE (XEXP (cond, 0));
3319 rtx cst = XEXP (cond, 1);
3321 /* We can't perform this optimization if either operand might be or might
3322 contain a signed zero. */
3323 if (HONOR_SIGNED_ZEROS (mode))
3325 /* It is sufficient to check if CST is or contains a zero. We must
3326 handle float, complex, and vector. If any subpart is a zero, then
3327 the optimization can't be performed. */
3328 /* ??? The complex and vector checks are not implemented yet. We just
3329 always return zero for them. */
3330 if (GET_CODE (cst) == CONST_DOUBLE)
3332 REAL_VALUE_TYPE d;
3333 REAL_VALUE_FROM_CONST_DOUBLE (d, cst);
3334 if (REAL_VALUES_EQUAL (d, dconst0))
3335 return 0;
3337 else
3338 return 0;
3341 return gcse_constant_p (cst);
3344 /* Find the implicit sets of a function. An "implicit set" is a constraint
3345 on the value of a variable, implied by a conditional jump. For example,
3346 following "if (x == 2)", the then branch may be optimized as though the
3347 conditional performed an "explicit set", in this example, "x = 2". This
3348 function records the set patterns that are implicit at the start of each
3349 basic block. */
3351 static void
3352 find_implicit_sets (void)
3354 basic_block bb, dest;
3355 unsigned int count;
3356 rtx cond, new;
3358 count = 0;
3359 FOR_EACH_BB (bb)
3360 /* Check for more than one successor. */
3361 if (EDGE_COUNT (bb->succs) > 1)
3363 cond = fis_get_condition (BB_END (bb));
3365 if (cond
3366 && (GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
3367 && REG_P (XEXP (cond, 0))
3368 && REGNO (XEXP (cond, 0)) >= FIRST_PSEUDO_REGISTER
3369 && implicit_set_cond_p (cond))
3371 dest = GET_CODE (cond) == EQ ? BRANCH_EDGE (bb)->dest
3372 : FALLTHRU_EDGE (bb)->dest;
3374 if (dest && single_pred_p (dest)
3375 && dest != EXIT_BLOCK_PTR)
3377 new = gen_rtx_SET (VOIDmode, XEXP (cond, 0),
3378 XEXP (cond, 1));
3379 implicit_sets[dest->index] = new;
3380 if (dump_file)
3382 fprintf(dump_file, "Implicit set of reg %d in ",
3383 REGNO (XEXP (cond, 0)));
3384 fprintf(dump_file, "basic block %d\n", dest->index);
3386 count++;
3391 if (dump_file)
3392 fprintf (dump_file, "Found %d implicit sets\n", count);
3395 /* Perform one copy/constant propagation pass.
3396 PASS is the pass count. If CPROP_JUMPS is true, perform constant
3397 propagation into conditional jumps. If BYPASS_JUMPS is true,
3398 perform conditional jump bypassing optimizations. */
3400 static int
3401 one_cprop_pass (int pass, bool cprop_jumps, bool bypass_jumps)
3403 int changed = 0;
3405 global_const_prop_count = local_const_prop_count = 0;
3406 global_copy_prop_count = local_copy_prop_count = 0;
3408 if (cprop_jumps)
3409 local_cprop_pass (cprop_jumps);
3411 /* Determine implicit sets. */
3412 implicit_sets = XCNEWVEC (rtx, last_basic_block);
3413 find_implicit_sets ();
3415 alloc_hash_table (max_cuid, &set_hash_table, 1);
3416 compute_hash_table (&set_hash_table);
3418 /* Free implicit_sets before peak usage. */
3419 free (implicit_sets);
3420 implicit_sets = NULL;
3422 if (dump_file)
3423 dump_hash_table (dump_file, "SET", &set_hash_table);
3424 if (set_hash_table.n_elems > 0)
3426 alloc_cprop_mem (last_basic_block, set_hash_table.n_elems);
3427 compute_cprop_data ();
3428 changed = cprop (cprop_jumps);
3429 if (bypass_jumps)
3430 changed |= bypass_conditional_jumps ();
3431 free_cprop_mem ();
3434 free_hash_table (&set_hash_table);
3436 if (dump_file)
3438 fprintf (dump_file, "CPROP of %s, pass %d: %d bytes needed, ",
3439 current_function_name (), pass, bytes_used);
3440 fprintf (dump_file, "%d local const props, %d local copy props, ",
3441 local_const_prop_count, local_copy_prop_count);
3442 fprintf (dump_file, "%d global const props, %d global copy props\n\n",
3443 global_const_prop_count, global_copy_prop_count);
3445 /* Global analysis may get into infinite loops for unreachable blocks. */
3446 if (changed && cprop_jumps)
3447 delete_unreachable_blocks ();
3449 return changed;
3452 /* Bypass conditional jumps. */
3454 /* The value of last_basic_block at the beginning of the jump_bypass
3455 pass. The use of redirect_edge_and_branch_force may introduce new
3456 basic blocks, but the data flow analysis is only valid for basic
3457 block indices less than bypass_last_basic_block. */
3459 static int bypass_last_basic_block;
3461 /* Find a set of REGNO to a constant that is available at the end of basic
3462 block BB. Returns NULL if no such set is found. Based heavily upon
3463 find_avail_set. */
3465 static struct expr *
3466 find_bypass_set (int regno, int bb)
3468 struct expr *result = 0;
3470 for (;;)
3472 rtx src;
3473 struct expr *set = lookup_set (regno, &set_hash_table);
3475 while (set)
3477 if (TEST_BIT (cprop_avout[bb], set->bitmap_index))
3478 break;
3479 set = next_set (regno, set);
3482 if (set == 0)
3483 break;
3485 gcc_assert (GET_CODE (set->expr) == SET);
3487 src = SET_SRC (set->expr);
3488 if (gcse_constant_p (src))
3489 result = set;
3491 if (! REG_P (src))
3492 break;
3494 regno = REGNO (src);
3496 return result;
3500 /* Subroutine of bypass_block that checks whether a pseudo is killed by
3501 any of the instructions inserted on an edge. Jump bypassing places
3502 condition code setters on CFG edges using insert_insn_on_edge. This
3503 function is required to check that our data flow analysis is still
3504 valid prior to commit_edge_insertions. */
3506 static bool
3507 reg_killed_on_edge (rtx reg, edge e)
3509 rtx insn;
3511 for (insn = e->insns.r; insn; insn = NEXT_INSN (insn))
3512 if (INSN_P (insn) && reg_set_p (reg, insn))
3513 return true;
3515 return false;
3518 /* Subroutine of bypass_conditional_jumps that attempts to bypass the given
3519 basic block BB which has more than one predecessor. If not NULL, SETCC
3520 is the first instruction of BB, which is immediately followed by JUMP_INSN
3521 JUMP. Otherwise, SETCC is NULL, and JUMP is the first insn of BB.
3522 Returns nonzero if a change was made.
3524 During the jump bypassing pass, we may place copies of SETCC instructions
3525 on CFG edges. The following routine must be careful to pay attention to
3526 these inserted insns when performing its transformations. */
3528 static int
3529 bypass_block (basic_block bb, rtx setcc, rtx jump)
3531 rtx insn, note;
3532 edge e, edest;
3533 int i, change;
3534 int may_be_loop_header;
3535 unsigned removed_p;
3536 edge_iterator ei;
3538 insn = (setcc != NULL) ? setcc : jump;
3540 /* Determine set of register uses in INSN. */
3541 reg_use_count = 0;
3542 note_uses (&PATTERN (insn), find_used_regs, NULL);
3543 note = find_reg_equal_equiv_note (insn);
3544 if (note)
3545 find_used_regs (&XEXP (note, 0), NULL);
3547 may_be_loop_header = false;
3548 FOR_EACH_EDGE (e, ei, bb->preds)
3549 if (e->flags & EDGE_DFS_BACK)
3551 may_be_loop_header = true;
3552 break;
3555 change = 0;
3556 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
3558 removed_p = 0;
3560 if (e->flags & EDGE_COMPLEX)
3562 ei_next (&ei);
3563 continue;
3566 /* We can't redirect edges from new basic blocks. */
3567 if (e->src->index >= bypass_last_basic_block)
3569 ei_next (&ei);
3570 continue;
3573 /* The irreducible loops created by redirecting of edges entering the
3574 loop from outside would decrease effectiveness of some of the following
3575 optimizations, so prevent this. */
3576 if (may_be_loop_header
3577 && !(e->flags & EDGE_DFS_BACK))
3579 ei_next (&ei);
3580 continue;
3583 for (i = 0; i < reg_use_count; i++)
3585 struct reg_use *reg_used = &reg_use_table[i];
3586 unsigned int regno = REGNO (reg_used->reg_rtx);
3587 basic_block dest, old_dest;
3588 struct expr *set;
3589 rtx src, new;
3591 if (regno >= max_gcse_regno)
3592 continue;
3594 set = find_bypass_set (regno, e->src->index);
3596 if (! set)
3597 continue;
3599 /* Check the data flow is valid after edge insertions. */
3600 if (e->insns.r && reg_killed_on_edge (reg_used->reg_rtx, e))
3601 continue;
3603 src = SET_SRC (pc_set (jump));
3605 if (setcc != NULL)
3606 src = simplify_replace_rtx (src,
3607 SET_DEST (PATTERN (setcc)),
3608 SET_SRC (PATTERN (setcc)));
3610 new = simplify_replace_rtx (src, reg_used->reg_rtx,
3611 SET_SRC (set->expr));
3613 /* Jump bypassing may have already placed instructions on
3614 edges of the CFG. We can't bypass an outgoing edge that
3615 has instructions associated with it, as these insns won't
3616 get executed if the incoming edge is redirected. */
3618 if (new == pc_rtx)
3620 edest = FALLTHRU_EDGE (bb);
3621 dest = edest->insns.r ? NULL : edest->dest;
3623 else if (GET_CODE (new) == LABEL_REF)
3625 dest = BLOCK_FOR_INSN (XEXP (new, 0));
3626 /* Don't bypass edges containing instructions. */
3627 edest = find_edge (bb, dest);
3628 if (edest && edest->insns.r)
3629 dest = NULL;
3631 else
3632 dest = NULL;
3634 /* Avoid unification of the edge with other edges from original
3635 branch. We would end up emitting the instruction on "both"
3636 edges. */
3638 if (dest && setcc && !CC0_P (SET_DEST (PATTERN (setcc)))
3639 && find_edge (e->src, dest))
3640 dest = NULL;
3642 old_dest = e->dest;
3643 if (dest != NULL
3644 && dest != old_dest
3645 && dest != EXIT_BLOCK_PTR)
3647 redirect_edge_and_branch_force (e, dest);
3649 /* Copy the register setter to the redirected edge.
3650 Don't copy CC0 setters, as CC0 is dead after jump. */
3651 if (setcc)
3653 rtx pat = PATTERN (setcc);
3654 if (!CC0_P (SET_DEST (pat)))
3655 insert_insn_on_edge (copy_insn (pat), e);
3658 if (dump_file != NULL)
3660 fprintf (dump_file, "JUMP-BYPASS: Proved reg %d "
3661 "in jump_insn %d equals constant ",
3662 regno, INSN_UID (jump));
3663 print_rtl (dump_file, SET_SRC (set->expr));
3664 fprintf (dump_file, "\nBypass edge from %d->%d to %d\n",
3665 e->src->index, old_dest->index, dest->index);
3667 change = 1;
3668 removed_p = 1;
3669 break;
3672 if (!removed_p)
3673 ei_next (&ei);
3675 return change;
3678 /* Find basic blocks with more than one predecessor that only contain a
3679 single conditional jump. If the result of the comparison is known at
3680 compile-time from any incoming edge, redirect that edge to the
3681 appropriate target. Returns nonzero if a change was made.
3683 This function is now mis-named, because we also handle indirect jumps. */
3685 static int
3686 bypass_conditional_jumps (void)
3688 basic_block bb;
3689 int changed;
3690 rtx setcc;
3691 rtx insn;
3692 rtx dest;
3694 /* Note we start at block 1. */
3695 if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
3696 return 0;
3698 bypass_last_basic_block = last_basic_block;
3699 mark_dfs_back_edges ();
3701 changed = 0;
3702 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb,
3703 EXIT_BLOCK_PTR, next_bb)
3705 /* Check for more than one predecessor. */
3706 if (!single_pred_p (bb))
3708 setcc = NULL_RTX;
3709 FOR_BB_INSNS (bb, insn)
3710 if (NONJUMP_INSN_P (insn))
3712 if (setcc)
3713 break;
3714 if (GET_CODE (PATTERN (insn)) != SET)
3715 break;
3717 dest = SET_DEST (PATTERN (insn));
3718 if (REG_P (dest) || CC0_P (dest))
3719 setcc = insn;
3720 else
3721 break;
3723 else if (JUMP_P (insn))
3725 if ((any_condjump_p (insn) || computed_jump_p (insn))
3726 && onlyjump_p (insn))
3727 changed |= bypass_block (bb, setcc, insn);
3728 break;
3730 else if (INSN_P (insn))
3731 break;
3735 /* If we bypassed any register setting insns, we inserted a
3736 copy on the redirected edge. These need to be committed. */
3737 if (changed)
3738 commit_edge_insertions ();
3740 return changed;
3743 /* Compute PRE+LCM working variables. */
3745 /* Local properties of expressions. */
3746 /* Nonzero for expressions that are transparent in the block. */
3747 static sbitmap *transp;
3749 /* Nonzero for expressions that are transparent at the end of the block.
3750 This is only zero for expressions killed by abnormal critical edge
3751 created by a calls. */
3752 static sbitmap *transpout;
3754 /* Nonzero for expressions that are computed (available) in the block. */
3755 static sbitmap *comp;
3757 /* Nonzero for expressions that are locally anticipatable in the block. */
3758 static sbitmap *antloc;
3760 /* Nonzero for expressions where this block is an optimal computation
3761 point. */
3762 static sbitmap *pre_optimal;
3764 /* Nonzero for expressions which are redundant in a particular block. */
3765 static sbitmap *pre_redundant;
3767 /* Nonzero for expressions which should be inserted on a specific edge. */
3768 static sbitmap *pre_insert_map;
3770 /* Nonzero for expressions which should be deleted in a specific block. */
3771 static sbitmap *pre_delete_map;
3773 /* Contains the edge_list returned by pre_edge_lcm. */
3774 static struct edge_list *edge_list;
3776 /* Redundant insns. */
3777 static sbitmap pre_redundant_insns;
3779 /* Allocate vars used for PRE analysis. */
3781 static void
3782 alloc_pre_mem (int n_blocks, int n_exprs)
3784 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
3785 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
3786 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
3788 pre_optimal = NULL;
3789 pre_redundant = NULL;
3790 pre_insert_map = NULL;
3791 pre_delete_map = NULL;
3792 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
3794 /* pre_insert and pre_delete are allocated later. */
3797 /* Free vars used for PRE analysis. */
3799 static void
3800 free_pre_mem (void)
3802 sbitmap_vector_free (transp);
3803 sbitmap_vector_free (comp);
3805 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
3807 if (pre_optimal)
3808 sbitmap_vector_free (pre_optimal);
3809 if (pre_redundant)
3810 sbitmap_vector_free (pre_redundant);
3811 if (pre_insert_map)
3812 sbitmap_vector_free (pre_insert_map);
3813 if (pre_delete_map)
3814 sbitmap_vector_free (pre_delete_map);
3816 transp = comp = NULL;
3817 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
3820 /* Top level routine to do the dataflow analysis needed by PRE. */
3822 static void
3823 compute_pre_data (void)
3825 sbitmap trapping_expr;
3826 basic_block bb;
3827 unsigned int ui;
3829 compute_local_properties (transp, comp, antloc, &expr_hash_table);
3830 sbitmap_vector_zero (ae_kill, last_basic_block);
3832 /* Collect expressions which might trap. */
3833 trapping_expr = sbitmap_alloc (expr_hash_table.n_elems);
3834 sbitmap_zero (trapping_expr);
3835 for (ui = 0; ui < expr_hash_table.size; ui++)
3837 struct expr *e;
3838 for (e = expr_hash_table.table[ui]; e != NULL; e = e->next_same_hash)
3839 if (may_trap_p (e->expr))
3840 SET_BIT (trapping_expr, e->bitmap_index);
3843 /* Compute ae_kill for each basic block using:
3845 ~(TRANSP | COMP)
3848 FOR_EACH_BB (bb)
3850 edge e;
3851 edge_iterator ei;
3853 /* If the current block is the destination of an abnormal edge, we
3854 kill all trapping expressions because we won't be able to properly
3855 place the instruction on the edge. So make them neither
3856 anticipatable nor transparent. This is fairly conservative. */
3857 FOR_EACH_EDGE (e, ei, bb->preds)
3858 if (e->flags & EDGE_ABNORMAL)
3860 sbitmap_difference (antloc[bb->index], antloc[bb->index], trapping_expr);
3861 sbitmap_difference (transp[bb->index], transp[bb->index], trapping_expr);
3862 break;
3865 sbitmap_a_or_b (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
3866 sbitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
3869 edge_list = pre_edge_lcm (expr_hash_table.n_elems, transp, comp, antloc,
3870 ae_kill, &pre_insert_map, &pre_delete_map);
3871 sbitmap_vector_free (antloc);
3872 antloc = NULL;
3873 sbitmap_vector_free (ae_kill);
3874 ae_kill = NULL;
3875 sbitmap_free (trapping_expr);
3878 /* PRE utilities */
3880 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
3881 block BB.
3883 VISITED is a pointer to a working buffer for tracking which BB's have
3884 been visited. It is NULL for the top-level call.
3886 We treat reaching expressions that go through blocks containing the same
3887 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
3888 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
3889 2 as not reaching. The intent is to improve the probability of finding
3890 only one reaching expression and to reduce register lifetimes by picking
3891 the closest such expression. */
3893 static int
3894 pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr, basic_block bb, char *visited)
3896 edge pred;
3897 edge_iterator ei;
3899 FOR_EACH_EDGE (pred, ei, bb->preds)
3901 basic_block pred_bb = pred->src;
3903 if (pred->src == ENTRY_BLOCK_PTR
3904 /* Has predecessor has already been visited? */
3905 || visited[pred_bb->index])
3906 ;/* Nothing to do. */
3908 /* Does this predecessor generate this expression? */
3909 else if (TEST_BIT (comp[pred_bb->index], expr->bitmap_index))
3911 /* Is this the occurrence we're looking for?
3912 Note that there's only one generating occurrence per block
3913 so we just need to check the block number. */
3914 if (occr_bb == pred_bb)
3915 return 1;
3917 visited[pred_bb->index] = 1;
3919 /* Ignore this predecessor if it kills the expression. */
3920 else if (! TEST_BIT (transp[pred_bb->index], expr->bitmap_index))
3921 visited[pred_bb->index] = 1;
3923 /* Neither gen nor kill. */
3924 else
3926 visited[pred_bb->index] = 1;
3927 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
3928 return 1;
3932 /* All paths have been checked. */
3933 return 0;
3936 /* The wrapper for pre_expr_reaches_here_work that ensures that any
3937 memory allocated for that function is returned. */
3939 static int
3940 pre_expr_reaches_here_p (basic_block occr_bb, struct expr *expr, basic_block bb)
3942 int rval;
3943 char *visited = XCNEWVEC (char, last_basic_block);
3945 rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
3947 free (visited);
3948 return rval;
3952 /* Given an expr, generate RTL which we can insert at the end of a BB,
3953 or on an edge. Set the block number of any insns generated to
3954 the value of BB. */
3956 static rtx
3957 process_insert_insn (struct expr *expr)
3959 rtx reg = expr->reaching_reg;
3960 rtx exp = copy_rtx (expr->expr);
3961 rtx pat;
3963 start_sequence ();
3965 /* If the expression is something that's an operand, like a constant,
3966 just copy it to a register. */
3967 if (general_operand (exp, GET_MODE (reg)))
3968 emit_move_insn (reg, exp);
3970 /* Otherwise, make a new insn to compute this expression and make sure the
3971 insn will be recognized (this also adds any needed CLOBBERs). Copy the
3972 expression to make sure we don't have any sharing issues. */
3973 else
3975 rtx insn = emit_insn (gen_rtx_SET (VOIDmode, reg, exp));
3977 if (insn_invalid_p (insn))
3978 gcc_unreachable ();
3982 pat = get_insns ();
3983 end_sequence ();
3985 return pat;
3988 /* Add EXPR to the end of basic block BB.
3990 This is used by both the PRE and code hoisting.
3992 For PRE, we want to verify that the expr is either transparent
3993 or locally anticipatable in the target block. This check makes
3994 no sense for code hoisting. */
3996 static void
3997 insert_insn_end_basic_block (struct expr *expr, basic_block bb, int pre)
3999 rtx insn = BB_END (bb);
4000 rtx new_insn;
4001 rtx reg = expr->reaching_reg;
4002 int regno = REGNO (reg);
4003 rtx pat, pat_end;
4005 pat = process_insert_insn (expr);
4006 gcc_assert (pat && INSN_P (pat));
4008 pat_end = pat;
4009 while (NEXT_INSN (pat_end) != NULL_RTX)
4010 pat_end = NEXT_INSN (pat_end);
4012 /* If the last insn is a jump, insert EXPR in front [taking care to
4013 handle cc0, etc. properly]. Similarly we need to care trapping
4014 instructions in presence of non-call exceptions. */
4016 if (JUMP_P (insn)
4017 || (NONJUMP_INSN_P (insn)
4018 && (!single_succ_p (bb)
4019 || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
4021 #ifdef HAVE_cc0
4022 rtx note;
4023 #endif
4024 /* It should always be the case that we can put these instructions
4025 anywhere in the basic block with performing PRE optimizations.
4026 Check this. */
4027 gcc_assert (!NONJUMP_INSN_P (insn) || !pre
4028 || TEST_BIT (antloc[bb->index], expr->bitmap_index)
4029 || TEST_BIT (transp[bb->index], expr->bitmap_index));
4031 /* If this is a jump table, then we can't insert stuff here. Since
4032 we know the previous real insn must be the tablejump, we insert
4033 the new instruction just before the tablejump. */
4034 if (GET_CODE (PATTERN (insn)) == ADDR_VEC
4035 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
4036 insn = prev_real_insn (insn);
4038 #ifdef HAVE_cc0
4039 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
4040 if cc0 isn't set. */
4041 note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
4042 if (note)
4043 insn = XEXP (note, 0);
4044 else
4046 rtx maybe_cc0_setter = prev_nonnote_insn (insn);
4047 if (maybe_cc0_setter
4048 && INSN_P (maybe_cc0_setter)
4049 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
4050 insn = maybe_cc0_setter;
4052 #endif
4053 /* FIXME: What if something in cc0/jump uses value set in new insn? */
4054 new_insn = emit_insn_before_noloc (pat, insn, bb);
4057 /* Likewise if the last insn is a call, as will happen in the presence
4058 of exception handling. */
4059 else if (CALL_P (insn)
4060 && (!single_succ_p (bb)
4061 || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
4063 /* Keeping in mind SMALL_REGISTER_CLASSES and parameters in registers,
4064 we search backward and place the instructions before the first
4065 parameter is loaded. Do this for everyone for consistency and a
4066 presumption that we'll get better code elsewhere as well.
4068 It should always be the case that we can put these instructions
4069 anywhere in the basic block with performing PRE optimizations.
4070 Check this. */
4072 gcc_assert (!pre
4073 || TEST_BIT (antloc[bb->index], expr->bitmap_index)
4074 || TEST_BIT (transp[bb->index], expr->bitmap_index));
4076 /* Since different machines initialize their parameter registers
4077 in different orders, assume nothing. Collect the set of all
4078 parameter registers. */
4079 insn = find_first_parameter_load (insn, BB_HEAD (bb));
4081 /* If we found all the parameter loads, then we want to insert
4082 before the first parameter load.
4084 If we did not find all the parameter loads, then we might have
4085 stopped on the head of the block, which could be a CODE_LABEL.
4086 If we inserted before the CODE_LABEL, then we would be putting
4087 the insn in the wrong basic block. In that case, put the insn
4088 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
4089 while (LABEL_P (insn)
4090 || NOTE_INSN_BASIC_BLOCK_P (insn))
4091 insn = NEXT_INSN (insn);
4093 new_insn = emit_insn_before_noloc (pat, insn, bb);
4095 else
4096 new_insn = emit_insn_after_noloc (pat, insn, bb);
4098 while (1)
4100 if (INSN_P (pat))
4102 add_label_notes (PATTERN (pat), new_insn);
4103 note_stores (PATTERN (pat), record_set_info, pat);
4105 if (pat == pat_end)
4106 break;
4107 pat = NEXT_INSN (pat);
4110 gcse_create_count++;
4112 if (dump_file)
4114 fprintf (dump_file, "PRE/HOIST: end of bb %d, insn %d, ",
4115 bb->index, INSN_UID (new_insn));
4116 fprintf (dump_file, "copying expression %d to reg %d\n",
4117 expr->bitmap_index, regno);
4121 /* Insert partially redundant expressions on edges in the CFG to make
4122 the expressions fully redundant. */
4124 static int
4125 pre_edge_insert (struct edge_list *edge_list, struct expr **index_map)
4127 int e, i, j, num_edges, set_size, did_insert = 0;
4128 sbitmap *inserted;
4130 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
4131 if it reaches any of the deleted expressions. */
4133 set_size = pre_insert_map[0]->size;
4134 num_edges = NUM_EDGES (edge_list);
4135 inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
4136 sbitmap_vector_zero (inserted, num_edges);
4138 for (e = 0; e < num_edges; e++)
4140 int indx;
4141 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
4143 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
4145 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
4147 for (j = indx; insert && j < (int) expr_hash_table.n_elems; j++, insert >>= 1)
4148 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
4150 struct expr *expr = index_map[j];
4151 struct occr *occr;
4153 /* Now look at each deleted occurrence of this expression. */
4154 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
4156 if (! occr->deleted_p)
4157 continue;
4159 /* Insert this expression on this edge if it would
4160 reach the deleted occurrence in BB. */
4161 if (!TEST_BIT (inserted[e], j))
4163 rtx insn;
4164 edge eg = INDEX_EDGE (edge_list, e);
4166 /* We can't insert anything on an abnormal and
4167 critical edge, so we insert the insn at the end of
4168 the previous block. There are several alternatives
4169 detailed in Morgans book P277 (sec 10.5) for
4170 handling this situation. This one is easiest for
4171 now. */
4173 if (eg->flags & EDGE_ABNORMAL)
4174 insert_insn_end_basic_block (index_map[j], bb, 0);
4175 else
4177 insn = process_insert_insn (index_map[j]);
4178 insert_insn_on_edge (insn, eg);
4181 if (dump_file)
4183 fprintf (dump_file, "PRE/HOIST: edge (%d,%d), ",
4184 bb->index,
4185 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
4186 fprintf (dump_file, "copy expression %d\n",
4187 expr->bitmap_index);
4190 update_ld_motion_stores (expr);
4191 SET_BIT (inserted[e], j);
4192 did_insert = 1;
4193 gcse_create_count++;
4200 sbitmap_vector_free (inserted);
4201 return did_insert;
4204 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
4205 Given "old_reg <- expr" (INSN), instead of adding after it
4206 reaching_reg <- old_reg
4207 it's better to do the following:
4208 reaching_reg <- expr
4209 old_reg <- reaching_reg
4210 because this way copy propagation can discover additional PRE
4211 opportunities. But if this fails, we try the old way.
4212 When "expr" is a store, i.e.
4213 given "MEM <- old_reg", instead of adding after it
4214 reaching_reg <- old_reg
4215 it's better to add it before as follows:
4216 reaching_reg <- old_reg
4217 MEM <- reaching_reg. */
4219 static void
4220 pre_insert_copy_insn (struct expr *expr, rtx insn)
4222 rtx reg = expr->reaching_reg;
4223 int regno = REGNO (reg);
4224 int indx = expr->bitmap_index;
4225 rtx pat = PATTERN (insn);
4226 rtx set, first_set, new_insn;
4227 rtx old_reg;
4228 int i;
4230 /* This block matches the logic in hash_scan_insn. */
4231 switch (GET_CODE (pat))
4233 case SET:
4234 set = pat;
4235 break;
4237 case PARALLEL:
4238 /* Search through the parallel looking for the set whose
4239 source was the expression that we're interested in. */
4240 first_set = NULL_RTX;
4241 set = NULL_RTX;
4242 for (i = 0; i < XVECLEN (pat, 0); i++)
4244 rtx x = XVECEXP (pat, 0, i);
4245 if (GET_CODE (x) == SET)
4247 /* If the source was a REG_EQUAL or REG_EQUIV note, we
4248 may not find an equivalent expression, but in this
4249 case the PARALLEL will have a single set. */
4250 if (first_set == NULL_RTX)
4251 first_set = x;
4252 if (expr_equiv_p (SET_SRC (x), expr->expr))
4254 set = x;
4255 break;
4260 gcc_assert (first_set);
4261 if (set == NULL_RTX)
4262 set = first_set;
4263 break;
4265 default:
4266 gcc_unreachable ();
4269 if (REG_P (SET_DEST (set)))
4271 old_reg = SET_DEST (set);
4272 /* Check if we can modify the set destination in the original insn. */
4273 if (validate_change (insn, &SET_DEST (set), reg, 0))
4275 new_insn = gen_move_insn (old_reg, reg);
4276 new_insn = emit_insn_after (new_insn, insn);
4278 /* Keep register set table up to date. */
4279 record_one_set (regno, insn);
4281 else
4283 new_insn = gen_move_insn (reg, old_reg);
4284 new_insn = emit_insn_after (new_insn, insn);
4286 /* Keep register set table up to date. */
4287 record_one_set (regno, new_insn);
4290 else /* This is possible only in case of a store to memory. */
4292 old_reg = SET_SRC (set);
4293 new_insn = gen_move_insn (reg, old_reg);
4295 /* Check if we can modify the set source in the original insn. */
4296 if (validate_change (insn, &SET_SRC (set), reg, 0))
4297 new_insn = emit_insn_before (new_insn, insn);
4298 else
4299 new_insn = emit_insn_after (new_insn, insn);
4301 /* Keep register set table up to date. */
4302 record_one_set (regno, new_insn);
4305 gcse_create_count++;
4307 if (dump_file)
4308 fprintf (dump_file,
4309 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
4310 BLOCK_NUM (insn), INSN_UID (new_insn), indx,
4311 INSN_UID (insn), regno);
4314 /* Copy available expressions that reach the redundant expression
4315 to `reaching_reg'. */
4317 static void
4318 pre_insert_copies (void)
4320 unsigned int i, added_copy;
4321 struct expr *expr;
4322 struct occr *occr;
4323 struct occr *avail;
4325 /* For each available expression in the table, copy the result to
4326 `reaching_reg' if the expression reaches a deleted one.
4328 ??? The current algorithm is rather brute force.
4329 Need to do some profiling. */
4331 for (i = 0; i < expr_hash_table.size; i++)
4332 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
4334 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
4335 we don't want to insert a copy here because the expression may not
4336 really be redundant. So only insert an insn if the expression was
4337 deleted. This test also avoids further processing if the
4338 expression wasn't deleted anywhere. */
4339 if (expr->reaching_reg == NULL)
4340 continue;
4342 /* Set when we add a copy for that expression. */
4343 added_copy = 0;
4345 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
4347 if (! occr->deleted_p)
4348 continue;
4350 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
4352 rtx insn = avail->insn;
4354 /* No need to handle this one if handled already. */
4355 if (avail->copied_p)
4356 continue;
4358 /* Don't handle this one if it's a redundant one. */
4359 if (TEST_BIT (pre_redundant_insns, INSN_CUID (insn)))
4360 continue;
4362 /* Or if the expression doesn't reach the deleted one. */
4363 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
4364 expr,
4365 BLOCK_FOR_INSN (occr->insn)))
4366 continue;
4368 added_copy = 1;
4370 /* Copy the result of avail to reaching_reg. */
4371 pre_insert_copy_insn (expr, insn);
4372 avail->copied_p = 1;
4376 if (added_copy)
4377 update_ld_motion_stores (expr);
4381 /* Emit move from SRC to DEST noting the equivalence with expression computed
4382 in INSN. */
4383 static rtx
4384 gcse_emit_move_after (rtx src, rtx dest, rtx insn)
4386 rtx new;
4387 rtx set = single_set (insn), set2;
4388 rtx note;
4389 rtx eqv;
4391 /* This should never fail since we're creating a reg->reg copy
4392 we've verified to be valid. */
4394 new = emit_insn_after (gen_move_insn (dest, src), insn);
4396 /* Note the equivalence for local CSE pass. */
4397 set2 = single_set (new);
4398 if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
4399 return new;
4400 if ((note = find_reg_equal_equiv_note (insn)))
4401 eqv = XEXP (note, 0);
4402 else
4403 eqv = SET_SRC (set);
4405 set_unique_reg_note (new, REG_EQUAL, copy_insn_1 (eqv));
4407 return new;
4410 /* Delete redundant computations.
4411 Deletion is done by changing the insn to copy the `reaching_reg' of
4412 the expression into the result of the SET. It is left to later passes
4413 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
4415 Returns nonzero if a change is made. */
4417 static int
4418 pre_delete (void)
4420 unsigned int i;
4421 int changed;
4422 struct expr *expr;
4423 struct occr *occr;
4425 changed = 0;
4426 for (i = 0; i < expr_hash_table.size; i++)
4427 for (expr = expr_hash_table.table[i];
4428 expr != NULL;
4429 expr = expr->next_same_hash)
4431 int indx = expr->bitmap_index;
4433 /* We only need to search antic_occr since we require
4434 ANTLOC != 0. */
4436 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
4438 rtx insn = occr->insn;
4439 rtx set;
4440 basic_block bb = BLOCK_FOR_INSN (insn);
4442 /* We only delete insns that have a single_set. */
4443 if (TEST_BIT (pre_delete_map[bb->index], indx)
4444 && (set = single_set (insn)) != 0
4445 && dbg_cnt (pre_insn))
4447 /* Create a pseudo-reg to store the result of reaching
4448 expressions into. Get the mode for the new pseudo from
4449 the mode of the original destination pseudo. */
4450 if (expr->reaching_reg == NULL)
4451 expr->reaching_reg
4452 = gen_reg_rtx (GET_MODE (SET_DEST (set)));
4454 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
4455 delete_insn (insn);
4456 occr->deleted_p = 1;
4457 SET_BIT (pre_redundant_insns, INSN_CUID (insn));
4458 changed = 1;
4459 gcse_subst_count++;
4461 if (dump_file)
4463 fprintf (dump_file,
4464 "PRE: redundant insn %d (expression %d) in ",
4465 INSN_UID (insn), indx);
4466 fprintf (dump_file, "bb %d, reaching reg is %d\n",
4467 bb->index, REGNO (expr->reaching_reg));
4473 return changed;
4476 /* Perform GCSE optimizations using PRE.
4477 This is called by one_pre_gcse_pass after all the dataflow analysis
4478 has been done.
4480 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
4481 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
4482 Compiler Design and Implementation.
4484 ??? A new pseudo reg is created to hold the reaching expression. The nice
4485 thing about the classical approach is that it would try to use an existing
4486 reg. If the register can't be adequately optimized [i.e. we introduce
4487 reload problems], one could add a pass here to propagate the new register
4488 through the block.
4490 ??? We don't handle single sets in PARALLELs because we're [currently] not
4491 able to copy the rest of the parallel when we insert copies to create full
4492 redundancies from partial redundancies. However, there's no reason why we
4493 can't handle PARALLELs in the cases where there are no partial
4494 redundancies. */
4496 static int
4497 pre_gcse (void)
4499 unsigned int i;
4500 int did_insert, changed;
4501 struct expr **index_map;
4502 struct expr *expr;
4504 /* Compute a mapping from expression number (`bitmap_index') to
4505 hash table entry. */
4507 index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
4508 for (i = 0; i < expr_hash_table.size; i++)
4509 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
4510 index_map[expr->bitmap_index] = expr;
4512 /* Reset bitmap used to track which insns are redundant. */
4513 pre_redundant_insns = sbitmap_alloc (max_cuid);
4514 sbitmap_zero (pre_redundant_insns);
4516 /* Delete the redundant insns first so that
4517 - we know what register to use for the new insns and for the other
4518 ones with reaching expressions
4519 - we know which insns are redundant when we go to create copies */
4521 changed = pre_delete ();
4522 did_insert = pre_edge_insert (edge_list, index_map);
4524 /* In other places with reaching expressions, copy the expression to the
4525 specially allocated pseudo-reg that reaches the redundant expr. */
4526 pre_insert_copies ();
4527 if (did_insert)
4529 commit_edge_insertions ();
4530 changed = 1;
4533 free (index_map);
4534 sbitmap_free (pre_redundant_insns);
4535 return changed;
4538 /* Top level routine to perform one PRE GCSE pass.
4540 Return nonzero if a change was made. */
4542 static int
4543 one_pre_gcse_pass (int pass)
4545 int changed = 0;
4547 gcse_subst_count = 0;
4548 gcse_create_count = 0;
4550 alloc_hash_table (max_cuid, &expr_hash_table, 0);
4551 add_noreturn_fake_exit_edges ();
4552 if (flag_gcse_lm)
4553 compute_ld_motion_mems ();
4555 compute_hash_table (&expr_hash_table);
4556 trim_ld_motion_mems ();
4557 if (dump_file)
4558 dump_hash_table (dump_file, "Expression", &expr_hash_table);
4560 if (expr_hash_table.n_elems > 0)
4562 alloc_pre_mem (last_basic_block, expr_hash_table.n_elems);
4563 compute_pre_data ();
4564 changed |= pre_gcse ();
4565 free_edge_list (edge_list);
4566 free_pre_mem ();
4569 free_ldst_mems ();
4570 remove_fake_exit_edges ();
4571 free_hash_table (&expr_hash_table);
4573 if (dump_file)
4575 fprintf (dump_file, "\nPRE GCSE of %s, pass %d: %d bytes needed, ",
4576 current_function_name (), pass, bytes_used);
4577 fprintf (dump_file, "%d substs, %d insns created\n",
4578 gcse_subst_count, gcse_create_count);
4581 return changed;
4584 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to INSN.
4585 If notes are added to an insn which references a CODE_LABEL, the
4586 LABEL_NUSES count is incremented. We have to add REG_LABEL notes,
4587 because the following loop optimization pass requires them. */
4589 /* ??? If there was a jump optimization pass after gcse and before loop,
4590 then we would not need to do this here, because jump would add the
4591 necessary REG_LABEL notes. */
4593 static void
4594 add_label_notes (rtx x, rtx insn)
4596 enum rtx_code code = GET_CODE (x);
4597 int i, j;
4598 const char *fmt;
4600 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
4602 /* This code used to ignore labels that referred to dispatch tables to
4603 avoid flow generating (slightly) worse code.
4605 We no longer ignore such label references (see LABEL_REF handling in
4606 mark_jump_label for additional information). */
4608 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
4609 REG_NOTES (insn));
4610 if (LABEL_P (XEXP (x, 0)))
4611 LABEL_NUSES (XEXP (x, 0))++;
4612 return;
4615 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
4617 if (fmt[i] == 'e')
4618 add_label_notes (XEXP (x, i), insn);
4619 else if (fmt[i] == 'E')
4620 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4621 add_label_notes (XVECEXP (x, i, j), insn);
4625 /* Compute transparent outgoing information for each block.
4627 An expression is transparent to an edge unless it is killed by
4628 the edge itself. This can only happen with abnormal control flow,
4629 when the edge is traversed through a call. This happens with
4630 non-local labels and exceptions.
4632 This would not be necessary if we split the edge. While this is
4633 normally impossible for abnormal critical edges, with some effort
4634 it should be possible with exception handling, since we still have
4635 control over which handler should be invoked. But due to increased
4636 EH table sizes, this may not be worthwhile. */
4638 static void
4639 compute_transpout (void)
4641 basic_block bb;
4642 unsigned int i;
4643 struct expr *expr;
4645 sbitmap_vector_ones (transpout, last_basic_block);
4647 FOR_EACH_BB (bb)
4649 /* Note that flow inserted a nop a the end of basic blocks that
4650 end in call instructions for reasons other than abnormal
4651 control flow. */
4652 if (! CALL_P (BB_END (bb)))
4653 continue;
4655 for (i = 0; i < expr_hash_table.size; i++)
4656 for (expr = expr_hash_table.table[i]; expr ; expr = expr->next_same_hash)
4657 if (MEM_P (expr->expr))
4659 if (GET_CODE (XEXP (expr->expr, 0)) == SYMBOL_REF
4660 && CONSTANT_POOL_ADDRESS_P (XEXP (expr->expr, 0)))
4661 continue;
4663 /* ??? Optimally, we would use interprocedural alias
4664 analysis to determine if this mem is actually killed
4665 by this call. */
4666 RESET_BIT (transpout[bb->index], expr->bitmap_index);
4671 /* Code Hoisting variables and subroutines. */
4673 /* Very busy expressions. */
4674 static sbitmap *hoist_vbein;
4675 static sbitmap *hoist_vbeout;
4677 /* Hoistable expressions. */
4678 static sbitmap *hoist_exprs;
4680 /* ??? We could compute post dominators and run this algorithm in
4681 reverse to perform tail merging, doing so would probably be
4682 more effective than the tail merging code in jump.c.
4684 It's unclear if tail merging could be run in parallel with
4685 code hoisting. It would be nice. */
4687 /* Allocate vars used for code hoisting analysis. */
4689 static void
4690 alloc_code_hoist_mem (int n_blocks, int n_exprs)
4692 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
4693 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
4694 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
4696 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
4697 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
4698 hoist_exprs = sbitmap_vector_alloc (n_blocks, n_exprs);
4699 transpout = sbitmap_vector_alloc (n_blocks, n_exprs);
4702 /* Free vars used for code hoisting analysis. */
4704 static void
4705 free_code_hoist_mem (void)
4707 sbitmap_vector_free (antloc);
4708 sbitmap_vector_free (transp);
4709 sbitmap_vector_free (comp);
4711 sbitmap_vector_free (hoist_vbein);
4712 sbitmap_vector_free (hoist_vbeout);
4713 sbitmap_vector_free (hoist_exprs);
4714 sbitmap_vector_free (transpout);
4716 free_dominance_info (CDI_DOMINATORS);
4719 /* Compute the very busy expressions at entry/exit from each block.
4721 An expression is very busy if all paths from a given point
4722 compute the expression. */
4724 static void
4725 compute_code_hoist_vbeinout (void)
4727 int changed, passes;
4728 basic_block bb;
4730 sbitmap_vector_zero (hoist_vbeout, last_basic_block);
4731 sbitmap_vector_zero (hoist_vbein, last_basic_block);
4733 passes = 0;
4734 changed = 1;
4736 while (changed)
4738 changed = 0;
4740 /* We scan the blocks in the reverse order to speed up
4741 the convergence. */
4742 FOR_EACH_BB_REVERSE (bb)
4744 changed |= sbitmap_a_or_b_and_c_cg (hoist_vbein[bb->index], antloc[bb->index],
4745 hoist_vbeout[bb->index], transp[bb->index]);
4746 if (bb->next_bb != EXIT_BLOCK_PTR)
4747 sbitmap_intersection_of_succs (hoist_vbeout[bb->index], hoist_vbein, bb->index);
4750 passes++;
4753 if (dump_file)
4754 fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);
4757 /* Top level routine to do the dataflow analysis needed by code hoisting. */
4759 static void
4760 compute_code_hoist_data (void)
4762 compute_local_properties (transp, comp, antloc, &expr_hash_table);
4763 compute_transpout ();
4764 compute_code_hoist_vbeinout ();
4765 calculate_dominance_info (CDI_DOMINATORS);
4766 if (dump_file)
4767 fprintf (dump_file, "\n");
4770 /* Determine if the expression identified by EXPR_INDEX would
4771 reach BB unimpared if it was placed at the end of EXPR_BB.
4773 It's unclear exactly what Muchnick meant by "unimpared". It seems
4774 to me that the expression must either be computed or transparent in
4775 *every* block in the path(s) from EXPR_BB to BB. Any other definition
4776 would allow the expression to be hoisted out of loops, even if
4777 the expression wasn't a loop invariant.
4779 Contrast this to reachability for PRE where an expression is
4780 considered reachable if *any* path reaches instead of *all*
4781 paths. */
4783 static int
4784 hoist_expr_reaches_here_p (basic_block expr_bb, int expr_index, basic_block bb, char *visited)
4786 edge pred;
4787 edge_iterator ei;
4788 int visited_allocated_locally = 0;
4791 if (visited == NULL)
4793 visited_allocated_locally = 1;
4794 visited = XCNEWVEC (char, last_basic_block);
4797 FOR_EACH_EDGE (pred, ei, bb->preds)
4799 basic_block pred_bb = pred->src;
4801 if (pred->src == ENTRY_BLOCK_PTR)
4802 break;
4803 else if (pred_bb == expr_bb)
4804 continue;
4805 else if (visited[pred_bb->index])
4806 continue;
4808 /* Does this predecessor generate this expression? */
4809 else if (TEST_BIT (comp[pred_bb->index], expr_index))
4810 break;
4811 else if (! TEST_BIT (transp[pred_bb->index], expr_index))
4812 break;
4814 /* Not killed. */
4815 else
4817 visited[pred_bb->index] = 1;
4818 if (! hoist_expr_reaches_here_p (expr_bb, expr_index,
4819 pred_bb, visited))
4820 break;
4823 if (visited_allocated_locally)
4824 free (visited);
4826 return (pred == NULL);
4829 /* Actually perform code hoisting. */
4831 static void
4832 hoist_code (void)
4834 basic_block bb, dominated;
4835 VEC (basic_block, heap) *domby;
4836 unsigned int i,j;
4837 struct expr **index_map;
4838 struct expr *expr;
4840 sbitmap_vector_zero (hoist_exprs, last_basic_block);
4842 /* Compute a mapping from expression number (`bitmap_index') to
4843 hash table entry. */
4845 index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
4846 for (i = 0; i < expr_hash_table.size; i++)
4847 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
4848 index_map[expr->bitmap_index] = expr;
4850 /* Walk over each basic block looking for potentially hoistable
4851 expressions, nothing gets hoisted from the entry block. */
4852 FOR_EACH_BB (bb)
4854 int found = 0;
4855 int insn_inserted_p;
4857 domby = get_dominated_by (CDI_DOMINATORS, bb);
4858 /* Examine each expression that is very busy at the exit of this
4859 block. These are the potentially hoistable expressions. */
4860 for (i = 0; i < hoist_vbeout[bb->index]->n_bits; i++)
4862 int hoistable = 0;
4864 if (TEST_BIT (hoist_vbeout[bb->index], i)
4865 && TEST_BIT (transpout[bb->index], i))
4867 /* We've found a potentially hoistable expression, now
4868 we look at every block BB dominates to see if it
4869 computes the expression. */
4870 for (j = 0; VEC_iterate (basic_block, domby, j, dominated); j++)
4872 /* Ignore self dominance. */
4873 if (bb == dominated)
4874 continue;
4875 /* We've found a dominated block, now see if it computes
4876 the busy expression and whether or not moving that
4877 expression to the "beginning" of that block is safe. */
4878 if (!TEST_BIT (antloc[dominated->index], i))
4879 continue;
4881 /* Note if the expression would reach the dominated block
4882 unimpared if it was placed at the end of BB.
4884 Keep track of how many times this expression is hoistable
4885 from a dominated block into BB. */
4886 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
4887 hoistable++;
4890 /* If we found more than one hoistable occurrence of this
4891 expression, then note it in the bitmap of expressions to
4892 hoist. It makes no sense to hoist things which are computed
4893 in only one BB, and doing so tends to pessimize register
4894 allocation. One could increase this value to try harder
4895 to avoid any possible code expansion due to register
4896 allocation issues; however experiments have shown that
4897 the vast majority of hoistable expressions are only movable
4898 from two successors, so raising this threshold is likely
4899 to nullify any benefit we get from code hoisting. */
4900 if (hoistable > 1)
4902 SET_BIT (hoist_exprs[bb->index], i);
4903 found = 1;
4907 /* If we found nothing to hoist, then quit now. */
4908 if (! found)
4910 VEC_free (basic_block, heap, domby);
4911 continue;
4914 /* Loop over all the hoistable expressions. */
4915 for (i = 0; i < hoist_exprs[bb->index]->n_bits; i++)
4917 /* We want to insert the expression into BB only once, so
4918 note when we've inserted it. */
4919 insn_inserted_p = 0;
4921 /* These tests should be the same as the tests above. */
4922 if (TEST_BIT (hoist_exprs[bb->index], i))
4924 /* We've found a potentially hoistable expression, now
4925 we look at every block BB dominates to see if it
4926 computes the expression. */
4927 for (j = 0; VEC_iterate (basic_block, domby, j, dominated); j++)
4929 /* Ignore self dominance. */
4930 if (bb == dominated)
4931 continue;
4933 /* We've found a dominated block, now see if it computes
4934 the busy expression and whether or not moving that
4935 expression to the "beginning" of that block is safe. */
4936 if (!TEST_BIT (antloc[dominated->index], i))
4937 continue;
4939 /* The expression is computed in the dominated block and
4940 it would be safe to compute it at the start of the
4941 dominated block. Now we have to determine if the
4942 expression would reach the dominated block if it was
4943 placed at the end of BB. */
4944 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
4946 struct expr *expr = index_map[i];
4947 struct occr *occr = expr->antic_occr;
4948 rtx insn;
4949 rtx set;
4951 /* Find the right occurrence of this expression. */
4952 while (BLOCK_FOR_INSN (occr->insn) != dominated && occr)
4953 occr = occr->next;
4955 gcc_assert (occr);
4956 insn = occr->insn;
4957 set = single_set (insn);
4958 gcc_assert (set);
4960 /* Create a pseudo-reg to store the result of reaching
4961 expressions into. Get the mode for the new pseudo
4962 from the mode of the original destination pseudo. */
4963 if (expr->reaching_reg == NULL)
4964 expr->reaching_reg
4965 = gen_reg_rtx (GET_MODE (SET_DEST (set)));
4967 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
4968 delete_insn (insn);
4969 occr->deleted_p = 1;
4970 if (!insn_inserted_p)
4972 insert_insn_end_basic_block (index_map[i], bb, 0);
4973 insn_inserted_p = 1;
4979 VEC_free (basic_block, heap, domby);
4982 free (index_map);
4985 /* Top level routine to perform one code hoisting (aka unification) pass
4987 Return nonzero if a change was made. */
4989 static int
4990 one_code_hoisting_pass (void)
4992 int changed = 0;
4994 alloc_hash_table (max_cuid, &expr_hash_table, 0);
4995 compute_hash_table (&expr_hash_table);
4996 if (dump_file)
4997 dump_hash_table (dump_file, "Code Hosting Expressions", &expr_hash_table);
4999 if (expr_hash_table.n_elems > 0)
5001 alloc_code_hoist_mem (last_basic_block, expr_hash_table.n_elems);
5002 compute_code_hoist_data ();
5003 hoist_code ();
5004 free_code_hoist_mem ();
5007 free_hash_table (&expr_hash_table);
5009 return changed;
5012 /* Here we provide the things required to do store motion towards
5013 the exit. In order for this to be effective, gcse also needed to
5014 be taught how to move a load when it is kill only by a store to itself.
5016 int i;
5017 float a[10];
5019 void foo(float scale)
5021 for (i=0; i<10; i++)
5022 a[i] *= scale;
5025 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
5026 the load out since its live around the loop, and stored at the bottom
5027 of the loop.
5029 The 'Load Motion' referred to and implemented in this file is
5030 an enhancement to gcse which when using edge based lcm, recognizes
5031 this situation and allows gcse to move the load out of the loop.
5033 Once gcse has hoisted the load, store motion can then push this
5034 load towards the exit, and we end up with no loads or stores of 'i'
5035 in the loop. */
5037 static hashval_t
5038 pre_ldst_expr_hash (const void *p)
5040 int do_not_record_p = 0;
5041 const struct ls_expr *x = p;
5042 return hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
5045 static int
5046 pre_ldst_expr_eq (const void *p1, const void *p2)
5048 const struct ls_expr *ptr1 = p1, *ptr2 = p2;
5049 return expr_equiv_p (ptr1->pattern, ptr2->pattern);
5052 /* This will search the ldst list for a matching expression. If it
5053 doesn't find one, we create one and initialize it. */
5055 static struct ls_expr *
5056 ldst_entry (rtx x)
5058 int do_not_record_p = 0;
5059 struct ls_expr * ptr;
5060 unsigned int hash;
5061 void **slot;
5062 struct ls_expr e;
5064 hash = hash_rtx (x, GET_MODE (x), &do_not_record_p,
5065 NULL, /*have_reg_qty=*/false);
5067 e.pattern = x;
5068 slot = htab_find_slot_with_hash (pre_ldst_table, &e, hash, INSERT);
5069 if (*slot)
5070 return (struct ls_expr *)*slot;
5072 ptr = XNEW (struct ls_expr);
5074 ptr->next = pre_ldst_mems;
5075 ptr->expr = NULL;
5076 ptr->pattern = x;
5077 ptr->pattern_regs = NULL_RTX;
5078 ptr->loads = NULL_RTX;
5079 ptr->stores = NULL_RTX;
5080 ptr->reaching_reg = NULL_RTX;
5081 ptr->invalid = 0;
5082 ptr->index = 0;
5083 ptr->hash_index = hash;
5084 pre_ldst_mems = ptr;
5085 *slot = ptr;
5087 return ptr;
5090 /* Free up an individual ldst entry. */
5092 static void
5093 free_ldst_entry (struct ls_expr * ptr)
5095 free_INSN_LIST_list (& ptr->loads);
5096 free_INSN_LIST_list (& ptr->stores);
5098 free (ptr);
5101 /* Free up all memory associated with the ldst list. */
5103 static void
5104 free_ldst_mems (void)
5106 if (pre_ldst_table)
5107 htab_delete (pre_ldst_table);
5108 pre_ldst_table = NULL;
5110 while (pre_ldst_mems)
5112 struct ls_expr * tmp = pre_ldst_mems;
5114 pre_ldst_mems = pre_ldst_mems->next;
5116 free_ldst_entry (tmp);
5119 pre_ldst_mems = NULL;
5122 /* Dump debugging info about the ldst list. */
5124 static void
5125 print_ldst_list (FILE * file)
5127 struct ls_expr * ptr;
5129 fprintf (file, "LDST list: \n");
5131 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
5133 fprintf (file, " Pattern (%3d): ", ptr->index);
5135 print_rtl (file, ptr->pattern);
5137 fprintf (file, "\n Loads : ");
5139 if (ptr->loads)
5140 print_rtl (file, ptr->loads);
5141 else
5142 fprintf (file, "(nil)");
5144 fprintf (file, "\n Stores : ");
5146 if (ptr->stores)
5147 print_rtl (file, ptr->stores);
5148 else
5149 fprintf (file, "(nil)");
5151 fprintf (file, "\n\n");
5154 fprintf (file, "\n");
5157 /* Returns 1 if X is in the list of ldst only expressions. */
5159 static struct ls_expr *
5160 find_rtx_in_ldst (rtx x)
5162 struct ls_expr e;
5163 void **slot;
5164 if (!pre_ldst_table)
5165 return NULL;
5166 e.pattern = x;
5167 slot = htab_find_slot (pre_ldst_table, &e, NO_INSERT);
5168 if (!slot || ((struct ls_expr *)*slot)->invalid)
5169 return NULL;
5170 return *slot;
5173 /* Assign each element of the list of mems a monotonically increasing value. */
5175 static int
5176 enumerate_ldsts (void)
5178 struct ls_expr * ptr;
5179 int n = 0;
5181 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
5182 ptr->index = n++;
5184 return n;
5187 /* Return first item in the list. */
5189 static inline struct ls_expr *
5190 first_ls_expr (void)
5192 return pre_ldst_mems;
5195 /* Return the next item in the list after the specified one. */
5197 static inline struct ls_expr *
5198 next_ls_expr (struct ls_expr * ptr)
5200 return ptr->next;
5203 /* Load Motion for loads which only kill themselves. */
5205 /* Return true if x is a simple MEM operation, with no registers or
5206 side effects. These are the types of loads we consider for the
5207 ld_motion list, otherwise we let the usual aliasing take care of it. */
5209 static int
5210 simple_mem (rtx x)
5212 if (! MEM_P (x))
5213 return 0;
5215 if (MEM_VOLATILE_P (x))
5216 return 0;
5218 if (GET_MODE (x) == BLKmode)
5219 return 0;
5221 /* If we are handling exceptions, we must be careful with memory references
5222 that may trap. If we are not, the behavior is undefined, so we may just
5223 continue. */
5224 if (flag_non_call_exceptions && may_trap_p (x))
5225 return 0;
5227 if (side_effects_p (x))
5228 return 0;
5230 /* Do not consider function arguments passed on stack. */
5231 if (reg_mentioned_p (stack_pointer_rtx, x))
5232 return 0;
5234 if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
5235 return 0;
5237 return 1;
5240 /* Make sure there isn't a buried reference in this pattern anywhere.
5241 If there is, invalidate the entry for it since we're not capable
5242 of fixing it up just yet.. We have to be sure we know about ALL
5243 loads since the aliasing code will allow all entries in the
5244 ld_motion list to not-alias itself. If we miss a load, we will get
5245 the wrong value since gcse might common it and we won't know to
5246 fix it up. */
5248 static void
5249 invalidate_any_buried_refs (rtx x)
5251 const char * fmt;
5252 int i, j;
5253 struct ls_expr * ptr;
5255 /* Invalidate it in the list. */
5256 if (MEM_P (x) && simple_mem (x))
5258 ptr = ldst_entry (x);
5259 ptr->invalid = 1;
5262 /* Recursively process the insn. */
5263 fmt = GET_RTX_FORMAT (GET_CODE (x));
5265 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5267 if (fmt[i] == 'e')
5268 invalidate_any_buried_refs (XEXP (x, i));
5269 else if (fmt[i] == 'E')
5270 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5271 invalidate_any_buried_refs (XVECEXP (x, i, j));
5275 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
5276 being defined as MEM loads and stores to symbols, with no side effects
5277 and no registers in the expression. For a MEM destination, we also
5278 check that the insn is still valid if we replace the destination with a
5279 REG, as is done in update_ld_motion_stores. If there are any uses/defs
5280 which don't match this criteria, they are invalidated and trimmed out
5281 later. */
5283 static void
5284 compute_ld_motion_mems (void)
5286 struct ls_expr * ptr;
5287 basic_block bb;
5288 rtx insn;
5290 pre_ldst_mems = NULL;
5291 pre_ldst_table = htab_create (13, pre_ldst_expr_hash,
5292 pre_ldst_expr_eq, NULL);
5294 FOR_EACH_BB (bb)
5296 FOR_BB_INSNS (bb, insn)
5298 if (INSN_P (insn))
5300 if (GET_CODE (PATTERN (insn)) == SET)
5302 rtx src = SET_SRC (PATTERN (insn));
5303 rtx dest = SET_DEST (PATTERN (insn));
5305 /* Check for a simple LOAD... */
5306 if (MEM_P (src) && simple_mem (src))
5308 ptr = ldst_entry (src);
5309 if (REG_P (dest))
5310 ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
5311 else
5312 ptr->invalid = 1;
5314 else
5316 /* Make sure there isn't a buried load somewhere. */
5317 invalidate_any_buried_refs (src);
5320 /* Check for stores. Don't worry about aliased ones, they
5321 will block any movement we might do later. We only care
5322 about this exact pattern since those are the only
5323 circumstance that we will ignore the aliasing info. */
5324 if (MEM_P (dest) && simple_mem (dest))
5326 ptr = ldst_entry (dest);
5328 if (! MEM_P (src)
5329 && GET_CODE (src) != ASM_OPERANDS
5330 /* Check for REG manually since want_to_gcse_p
5331 returns 0 for all REGs. */
5332 && can_assign_to_reg_p (src))
5333 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
5334 else
5335 ptr->invalid = 1;
5338 else
5339 invalidate_any_buried_refs (PATTERN (insn));
5345 /* Remove any references that have been either invalidated or are not in the
5346 expression list for pre gcse. */
5348 static void
5349 trim_ld_motion_mems (void)
5351 struct ls_expr * * last = & pre_ldst_mems;
5352 struct ls_expr * ptr = pre_ldst_mems;
5354 while (ptr != NULL)
5356 struct expr * expr;
5358 /* Delete if entry has been made invalid. */
5359 if (! ptr->invalid)
5361 /* Delete if we cannot find this mem in the expression list. */
5362 unsigned int hash = ptr->hash_index % expr_hash_table.size;
5364 for (expr = expr_hash_table.table[hash];
5365 expr != NULL;
5366 expr = expr->next_same_hash)
5367 if (expr_equiv_p (expr->expr, ptr->pattern))
5368 break;
5370 else
5371 expr = (struct expr *) 0;
5373 if (expr)
5375 /* Set the expression field if we are keeping it. */
5376 ptr->expr = expr;
5377 last = & ptr->next;
5378 ptr = ptr->next;
5380 else
5382 *last = ptr->next;
5383 htab_remove_elt_with_hash (pre_ldst_table, ptr, ptr->hash_index);
5384 free_ldst_entry (ptr);
5385 ptr = * last;
5389 /* Show the world what we've found. */
5390 if (dump_file && pre_ldst_mems != NULL)
5391 print_ldst_list (dump_file);
5394 /* This routine will take an expression which we are replacing with
5395 a reaching register, and update any stores that are needed if
5396 that expression is in the ld_motion list. Stores are updated by
5397 copying their SRC to the reaching register, and then storing
5398 the reaching register into the store location. These keeps the
5399 correct value in the reaching register for the loads. */
5401 static void
5402 update_ld_motion_stores (struct expr * expr)
5404 struct ls_expr * mem_ptr;
5406 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
5408 /* We can try to find just the REACHED stores, but is shouldn't
5409 matter to set the reaching reg everywhere... some might be
5410 dead and should be eliminated later. */
5412 /* We replace (set mem expr) with (set reg expr) (set mem reg)
5413 where reg is the reaching reg used in the load. We checked in
5414 compute_ld_motion_mems that we can replace (set mem expr) with
5415 (set reg expr) in that insn. */
5416 rtx list = mem_ptr->stores;
5418 for ( ; list != NULL_RTX; list = XEXP (list, 1))
5420 rtx insn = XEXP (list, 0);
5421 rtx pat = PATTERN (insn);
5422 rtx src = SET_SRC (pat);
5423 rtx reg = expr->reaching_reg;
5424 rtx copy, new;
5426 /* If we've already copied it, continue. */
5427 if (expr->reaching_reg == src)
5428 continue;
5430 if (dump_file)
5432 fprintf (dump_file, "PRE: store updated with reaching reg ");
5433 print_rtl (dump_file, expr->reaching_reg);
5434 fprintf (dump_file, ":\n ");
5435 print_inline_rtx (dump_file, insn, 8);
5436 fprintf (dump_file, "\n");
5439 copy = gen_move_insn ( reg, copy_rtx (SET_SRC (pat)));
5440 new = emit_insn_before (copy, insn);
5441 record_one_set (REGNO (reg), new);
5442 SET_SRC (pat) = reg;
5443 df_insn_rescan (insn);
5445 /* un-recognize this pattern since it's probably different now. */
5446 INSN_CODE (insn) = -1;
5447 gcse_create_count++;
5452 /* Store motion code. */
5454 #define ANTIC_STORE_LIST(x) ((x)->loads)
5455 #define AVAIL_STORE_LIST(x) ((x)->stores)
5456 #define LAST_AVAIL_CHECK_FAILURE(x) ((x)->reaching_reg)
5458 /* This is used to communicate the target bitvector we want to use in the
5459 reg_set_info routine when called via the note_stores mechanism. */
5460 static int * regvec;
5462 /* And current insn, for the same routine. */
5463 static rtx compute_store_table_current_insn;
5465 /* Used in computing the reverse edge graph bit vectors. */
5466 static sbitmap * st_antloc;
5468 /* Global holding the number of store expressions we are dealing with. */
5469 static int num_stores;
5471 /* Checks to set if we need to mark a register set. Called from
5472 note_stores. */
5474 static void
5475 reg_set_info (rtx dest, rtx setter ATTRIBUTE_UNUSED,
5476 void *data)
5478 sbitmap bb_reg = data;
5480 if (GET_CODE (dest) == SUBREG)
5481 dest = SUBREG_REG (dest);
5483 if (REG_P (dest))
5485 regvec[REGNO (dest)] = INSN_UID (compute_store_table_current_insn);
5486 if (bb_reg)
5487 SET_BIT (bb_reg, REGNO (dest));
5491 /* Clear any mark that says that this insn sets dest. Called from
5492 note_stores. */
5494 static void
5495 reg_clear_last_set (rtx dest, rtx setter ATTRIBUTE_UNUSED,
5496 void *data)
5498 int *dead_vec = data;
5500 if (GET_CODE (dest) == SUBREG)
5501 dest = SUBREG_REG (dest);
5503 if (REG_P (dest) &&
5504 dead_vec[REGNO (dest)] == INSN_UID (compute_store_table_current_insn))
5505 dead_vec[REGNO (dest)] = 0;
5508 /* Return zero if some of the registers in list X are killed
5509 due to set of registers in bitmap REGS_SET. */
5511 static bool
5512 store_ops_ok (rtx x, int *regs_set)
5514 rtx reg;
5516 for (; x; x = XEXP (x, 1))
5518 reg = XEXP (x, 0);
5519 if (regs_set[REGNO(reg)])
5520 return false;
5523 return true;
5526 /* Returns a list of registers mentioned in X. */
5527 static rtx
5528 extract_mentioned_regs (rtx x)
5530 return extract_mentioned_regs_helper (x, NULL_RTX);
5533 /* Helper for extract_mentioned_regs; ACCUM is used to accumulate used
5534 registers. */
5535 static rtx
5536 extract_mentioned_regs_helper (rtx x, rtx accum)
5538 int i;
5539 enum rtx_code code;
5540 const char * fmt;
5542 /* Repeat is used to turn tail-recursion into iteration. */
5543 repeat:
5545 if (x == 0)
5546 return accum;
5548 code = GET_CODE (x);
5549 switch (code)
5551 case REG:
5552 return alloc_EXPR_LIST (0, x, accum);
5554 case MEM:
5555 x = XEXP (x, 0);
5556 goto repeat;
5558 case PRE_DEC:
5559 case PRE_INC:
5560 case PRE_MODIFY:
5561 case POST_DEC:
5562 case POST_INC:
5563 case POST_MODIFY:
5564 /* We do not run this function with arguments having side effects. */
5565 gcc_unreachable ();
5567 case PC:
5568 case CC0: /*FIXME*/
5569 case CONST:
5570 case CONST_INT:
5571 case CONST_DOUBLE:
5572 case CONST_VECTOR:
5573 case SYMBOL_REF:
5574 case LABEL_REF:
5575 case ADDR_VEC:
5576 case ADDR_DIFF_VEC:
5577 return accum;
5579 default:
5580 break;
5583 i = GET_RTX_LENGTH (code) - 1;
5584 fmt = GET_RTX_FORMAT (code);
5586 for (; i >= 0; i--)
5588 if (fmt[i] == 'e')
5590 rtx tem = XEXP (x, i);
5592 /* If we are about to do the last recursive call
5593 needed at this level, change it into iteration. */
5594 if (i == 0)
5596 x = tem;
5597 goto repeat;
5600 accum = extract_mentioned_regs_helper (tem, accum);
5602 else if (fmt[i] == 'E')
5604 int j;
5606 for (j = 0; j < XVECLEN (x, i); j++)
5607 accum = extract_mentioned_regs_helper (XVECEXP (x, i, j), accum);
5611 return accum;
5614 /* Determine whether INSN is MEM store pattern that we will consider moving.
5615 REGS_SET_BEFORE is bitmap of registers set before (and including) the
5616 current insn, REGS_SET_AFTER is bitmap of registers set after (and
5617 including) the insn in this basic block. We must be passing through BB from
5618 head to end, as we are using this fact to speed things up.
5620 The results are stored this way:
5622 -- the first anticipatable expression is added into ANTIC_STORE_LIST
5623 -- if the processed expression is not anticipatable, NULL_RTX is added
5624 there instead, so that we can use it as indicator that no further
5625 expression of this type may be anticipatable
5626 -- if the expression is available, it is added as head of AVAIL_STORE_LIST;
5627 consequently, all of them but this head are dead and may be deleted.
5628 -- if the expression is not available, the insn due to that it fails to be
5629 available is stored in reaching_reg.
5631 The things are complicated a bit by fact that there already may be stores
5632 to the same MEM from other blocks; also caller must take care of the
5633 necessary cleanup of the temporary markers after end of the basic block.
5636 static void
5637 find_moveable_store (rtx insn, int *regs_set_before, int *regs_set_after)
5639 struct ls_expr * ptr;
5640 rtx dest, set, tmp;
5641 int check_anticipatable, check_available;
5642 basic_block bb = BLOCK_FOR_INSN (insn);
5644 set = single_set (insn);
5645 if (!set)
5646 return;
5648 dest = SET_DEST (set);
5650 if (! MEM_P (dest) || MEM_VOLATILE_P (dest)
5651 || GET_MODE (dest) == BLKmode)
5652 return;
5654 if (side_effects_p (dest))
5655 return;
5657 /* If we are handling exceptions, we must be careful with memory references
5658 that may trap. If we are not, the behavior is undefined, so we may just
5659 continue. */
5660 if (flag_non_call_exceptions && may_trap_p (dest))
5661 return;
5663 /* Even if the destination cannot trap, the source may. In this case we'd
5664 need to handle updating the REG_EH_REGION note. */
5665 if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
5666 return;
5668 /* Make sure that the SET_SRC of this store insns can be assigned to
5669 a register, or we will fail later on in replace_store_insn, which
5670 assumes that we can do this. But sometimes the target machine has
5671 oddities like MEM read-modify-write instruction. See for example
5672 PR24257. */
5673 if (!can_assign_to_reg_p (SET_SRC (set)))
5674 return;
5676 ptr = ldst_entry (dest);
5677 if (!ptr->pattern_regs)
5678 ptr->pattern_regs = extract_mentioned_regs (dest);
5680 /* Do not check for anticipatability if we either found one anticipatable
5681 store already, or tested for one and found out that it was killed. */
5682 check_anticipatable = 0;
5683 if (!ANTIC_STORE_LIST (ptr))
5684 check_anticipatable = 1;
5685 else
5687 tmp = XEXP (ANTIC_STORE_LIST (ptr), 0);
5688 if (tmp != NULL_RTX
5689 && BLOCK_FOR_INSN (tmp) != bb)
5690 check_anticipatable = 1;
5692 if (check_anticipatable)
5694 if (store_killed_before (dest, ptr->pattern_regs, insn, bb, regs_set_before))
5695 tmp = NULL_RTX;
5696 else
5697 tmp = insn;
5698 ANTIC_STORE_LIST (ptr) = alloc_INSN_LIST (tmp,
5699 ANTIC_STORE_LIST (ptr));
5702 /* It is not necessary to check whether store is available if we did
5703 it successfully before; if we failed before, do not bother to check
5704 until we reach the insn that caused us to fail. */
5705 check_available = 0;
5706 if (!AVAIL_STORE_LIST (ptr))
5707 check_available = 1;
5708 else
5710 tmp = XEXP (AVAIL_STORE_LIST (ptr), 0);
5711 if (BLOCK_FOR_INSN (tmp) != bb)
5712 check_available = 1;
5714 if (check_available)
5716 /* Check that we have already reached the insn at that the check
5717 failed last time. */
5718 if (LAST_AVAIL_CHECK_FAILURE (ptr))
5720 for (tmp = BB_END (bb);
5721 tmp != insn && tmp != LAST_AVAIL_CHECK_FAILURE (ptr);
5722 tmp = PREV_INSN (tmp))
5723 continue;
5724 if (tmp == insn)
5725 check_available = 0;
5727 else
5728 check_available = store_killed_after (dest, ptr->pattern_regs, insn,
5729 bb, regs_set_after,
5730 &LAST_AVAIL_CHECK_FAILURE (ptr));
5732 if (!check_available)
5733 AVAIL_STORE_LIST (ptr) = alloc_INSN_LIST (insn, AVAIL_STORE_LIST (ptr));
5736 /* Find available and anticipatable stores. */
5738 static int
5739 compute_store_table (void)
5741 int ret;
5742 basic_block bb;
5743 unsigned regno;
5744 rtx insn, pat, tmp;
5745 int *last_set_in, *already_set;
5746 struct ls_expr * ptr, **prev_next_ptr_ptr;
5748 max_gcse_regno = max_reg_num ();
5750 reg_set_in_block = sbitmap_vector_alloc (last_basic_block,
5751 max_gcse_regno);
5752 sbitmap_vector_zero (reg_set_in_block, last_basic_block);
5753 pre_ldst_mems = 0;
5754 pre_ldst_table = htab_create (13, pre_ldst_expr_hash,
5755 pre_ldst_expr_eq, NULL);
5756 last_set_in = XCNEWVEC (int, max_gcse_regno);
5757 already_set = XNEWVEC (int, max_gcse_regno);
5759 /* Find all the stores we care about. */
5760 FOR_EACH_BB (bb)
5762 /* First compute the registers set in this block. */
5763 regvec = last_set_in;
5765 FOR_BB_INSNS (bb, insn)
5767 if (! INSN_P (insn))
5768 continue;
5770 if (CALL_P (insn))
5772 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5773 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
5775 last_set_in[regno] = INSN_UID (insn);
5776 SET_BIT (reg_set_in_block[bb->index], regno);
5780 pat = PATTERN (insn);
5781 compute_store_table_current_insn = insn;
5782 note_stores (pat, reg_set_info, reg_set_in_block[bb->index]);
5785 /* Now find the stores. */
5786 memset (already_set, 0, sizeof (int) * max_gcse_regno);
5787 regvec = already_set;
5788 FOR_BB_INSNS (bb, insn)
5790 if (! INSN_P (insn))
5791 continue;
5793 if (CALL_P (insn))
5795 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5796 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
5797 already_set[regno] = 1;
5800 pat = PATTERN (insn);
5801 note_stores (pat, reg_set_info, NULL);
5803 /* Now that we've marked regs, look for stores. */
5804 find_moveable_store (insn, already_set, last_set_in);
5806 /* Unmark regs that are no longer set. */
5807 compute_store_table_current_insn = insn;
5808 note_stores (pat, reg_clear_last_set, last_set_in);
5809 if (CALL_P (insn))
5811 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5812 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno)
5813 && last_set_in[regno] == INSN_UID (insn))
5814 last_set_in[regno] = 0;
5818 #ifdef ENABLE_CHECKING
5819 /* last_set_in should now be all-zero. */
5820 for (regno = 0; regno < max_gcse_regno; regno++)
5821 gcc_assert (!last_set_in[regno]);
5822 #endif
5824 /* Clear temporary marks. */
5825 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
5827 LAST_AVAIL_CHECK_FAILURE(ptr) = NULL_RTX;
5828 if (ANTIC_STORE_LIST (ptr)
5829 && (tmp = XEXP (ANTIC_STORE_LIST (ptr), 0)) == NULL_RTX)
5830 ANTIC_STORE_LIST (ptr) = XEXP (ANTIC_STORE_LIST (ptr), 1);
5834 /* Remove the stores that are not available anywhere, as there will
5835 be no opportunity to optimize them. */
5836 for (ptr = pre_ldst_mems, prev_next_ptr_ptr = &pre_ldst_mems;
5837 ptr != NULL;
5838 ptr = *prev_next_ptr_ptr)
5840 if (!AVAIL_STORE_LIST (ptr))
5842 *prev_next_ptr_ptr = ptr->next;
5843 htab_remove_elt_with_hash (pre_ldst_table, ptr, ptr->hash_index);
5844 free_ldst_entry (ptr);
5846 else
5847 prev_next_ptr_ptr = &ptr->next;
5850 ret = enumerate_ldsts ();
5852 if (dump_file)
5854 fprintf (dump_file, "ST_avail and ST_antic (shown under loads..)\n");
5855 print_ldst_list (dump_file);
5858 free (last_set_in);
5859 free (already_set);
5860 return ret;
5863 /* Check to see if the load X is aliased with STORE_PATTERN.
5864 AFTER is true if we are checking the case when STORE_PATTERN occurs
5865 after the X. */
5867 static bool
5868 load_kills_store (rtx x, rtx store_pattern, int after)
5870 if (after)
5871 return anti_dependence (x, store_pattern);
5872 else
5873 return true_dependence (store_pattern, GET_MODE (store_pattern), x,
5874 rtx_addr_varies_p);
5877 /* Go through the entire insn X, looking for any loads which might alias
5878 STORE_PATTERN. Return true if found.
5879 AFTER is true if we are checking the case when STORE_PATTERN occurs
5880 after the insn X. */
5882 static bool
5883 find_loads (rtx x, rtx store_pattern, int after)
5885 const char * fmt;
5886 int i, j;
5887 int ret = false;
5889 if (!x)
5890 return false;
5892 if (GET_CODE (x) == SET)
5893 x = SET_SRC (x);
5895 if (MEM_P (x))
5897 if (load_kills_store (x, store_pattern, after))
5898 return true;
5901 /* Recursively process the insn. */
5902 fmt = GET_RTX_FORMAT (GET_CODE (x));
5904 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0 && !ret; i--)
5906 if (fmt[i] == 'e')
5907 ret |= find_loads (XEXP (x, i), store_pattern, after);
5908 else if (fmt[i] == 'E')
5909 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5910 ret |= find_loads (XVECEXP (x, i, j), store_pattern, after);
5912 return ret;
5915 static inline bool
5916 store_killed_in_pat (rtx x, rtx pat, int after)
5918 if (GET_CODE (pat) == SET)
5920 rtx dest = SET_DEST (pat);
5922 if (GET_CODE (dest) == ZERO_EXTRACT)
5923 dest = XEXP (dest, 0);
5925 /* Check for memory stores to aliased objects. */
5926 if (MEM_P (dest)
5927 && !expr_equiv_p (dest, x))
5929 if (after)
5931 if (output_dependence (dest, x))
5932 return true;
5934 else
5936 if (output_dependence (x, dest))
5937 return true;
5942 if (find_loads (pat, x, after))
5943 return true;
5945 return false;
5948 /* Check if INSN kills the store pattern X (is aliased with it).
5949 AFTER is true if we are checking the case when store X occurs
5950 after the insn. Return true if it does. */
5952 static bool
5953 store_killed_in_insn (rtx x, rtx x_regs, rtx insn, int after)
5955 rtx reg, base, note, pat;
5957 if (!INSN_P (insn))
5958 return false;
5960 if (CALL_P (insn))
5962 /* A normal or pure call might read from pattern,
5963 but a const call will not. */
5964 if (! CONST_OR_PURE_CALL_P (insn) || pure_call_p (insn))
5965 return true;
5967 /* But even a const call reads its parameters. Check whether the
5968 base of some of registers used in mem is stack pointer. */
5969 for (reg = x_regs; reg; reg = XEXP (reg, 1))
5971 base = find_base_term (XEXP (reg, 0));
5972 if (!base
5973 || (GET_CODE (base) == ADDRESS
5974 && GET_MODE (base) == Pmode
5975 && XEXP (base, 0) == stack_pointer_rtx))
5976 return true;
5979 return false;
5982 pat = PATTERN (insn);
5983 if (GET_CODE (pat) == SET)
5985 if (store_killed_in_pat (x, pat, after))
5986 return true;
5988 else if (GET_CODE (pat) == PARALLEL)
5990 int i;
5992 for (i = 0; i < XVECLEN (pat, 0); i++)
5993 if (store_killed_in_pat (x, XVECEXP (pat, 0, i), after))
5994 return true;
5996 else if (find_loads (PATTERN (insn), x, after))
5997 return true;
5999 /* If this insn has a REG_EQUAL or REG_EQUIV note referencing a memory
6000 location aliased with X, then this insn kills X. */
6001 note = find_reg_equal_equiv_note (insn);
6002 if (! note)
6003 return false;
6004 note = XEXP (note, 0);
6006 /* However, if the note represents a must alias rather than a may
6007 alias relationship, then it does not kill X. */
6008 if (expr_equiv_p (note, x))
6009 return false;
6011 /* See if there are any aliased loads in the note. */
6012 return find_loads (note, x, after);
6015 /* Returns true if the expression X is loaded or clobbered on or after INSN
6016 within basic block BB. REGS_SET_AFTER is bitmap of registers set in
6017 or after the insn. X_REGS is list of registers mentioned in X. If the store
6018 is killed, return the last insn in that it occurs in FAIL_INSN. */
6020 static bool
6021 store_killed_after (rtx x, rtx x_regs, rtx insn, basic_block bb,
6022 int *regs_set_after, rtx *fail_insn)
6024 rtx last = BB_END (bb), act;
6026 if (!store_ops_ok (x_regs, regs_set_after))
6028 /* We do not know where it will happen. */
6029 if (fail_insn)
6030 *fail_insn = NULL_RTX;
6031 return true;
6034 /* Scan from the end, so that fail_insn is determined correctly. */
6035 for (act = last; act != PREV_INSN (insn); act = PREV_INSN (act))
6036 if (store_killed_in_insn (x, x_regs, act, false))
6038 if (fail_insn)
6039 *fail_insn = act;
6040 return true;
6043 return false;
6046 /* Returns true if the expression X is loaded or clobbered on or before INSN
6047 within basic block BB. X_REGS is list of registers mentioned in X.
6048 REGS_SET_BEFORE is bitmap of registers set before or in this insn. */
6049 static bool
6050 store_killed_before (rtx x, rtx x_regs, rtx insn, basic_block bb,
6051 int *regs_set_before)
6053 rtx first = BB_HEAD (bb);
6055 if (!store_ops_ok (x_regs, regs_set_before))
6056 return true;
6058 for ( ; insn != PREV_INSN (first); insn = PREV_INSN (insn))
6059 if (store_killed_in_insn (x, x_regs, insn, true))
6060 return true;
6062 return false;
6065 /* Fill in available, anticipatable, transparent and kill vectors in
6066 STORE_DATA, based on lists of available and anticipatable stores. */
6067 static void
6068 build_store_vectors (void)
6070 basic_block bb;
6071 int *regs_set_in_block;
6072 rtx insn, st;
6073 struct ls_expr * ptr;
6074 unsigned regno;
6076 /* Build the gen_vector. This is any store in the table which is not killed
6077 by aliasing later in its block. */
6078 ae_gen = sbitmap_vector_alloc (last_basic_block, num_stores);
6079 sbitmap_vector_zero (ae_gen, last_basic_block);
6081 st_antloc = sbitmap_vector_alloc (last_basic_block, num_stores);
6082 sbitmap_vector_zero (st_antloc, last_basic_block);
6084 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
6086 for (st = AVAIL_STORE_LIST (ptr); st != NULL; st = XEXP (st, 1))
6088 insn = XEXP (st, 0);
6089 bb = BLOCK_FOR_INSN (insn);
6091 /* If we've already seen an available expression in this block,
6092 we can delete this one (It occurs earlier in the block). We'll
6093 copy the SRC expression to an unused register in case there
6094 are any side effects. */
6095 if (TEST_BIT (ae_gen[bb->index], ptr->index))
6097 rtx r = gen_reg_rtx (GET_MODE (ptr->pattern));
6098 if (dump_file)
6099 fprintf (dump_file, "Removing redundant store:\n");
6100 replace_store_insn (r, XEXP (st, 0), bb, ptr);
6101 continue;
6103 SET_BIT (ae_gen[bb->index], ptr->index);
6106 for (st = ANTIC_STORE_LIST (ptr); st != NULL; st = XEXP (st, 1))
6108 insn = XEXP (st, 0);
6109 bb = BLOCK_FOR_INSN (insn);
6110 SET_BIT (st_antloc[bb->index], ptr->index);
6114 ae_kill = sbitmap_vector_alloc (last_basic_block, num_stores);
6115 sbitmap_vector_zero (ae_kill, last_basic_block);
6117 transp = sbitmap_vector_alloc (last_basic_block, num_stores);
6118 sbitmap_vector_zero (transp, last_basic_block);
6119 regs_set_in_block = XNEWVEC (int, max_gcse_regno);
6121 FOR_EACH_BB (bb)
6123 for (regno = 0; regno < max_gcse_regno; regno++)
6124 regs_set_in_block[regno] = TEST_BIT (reg_set_in_block[bb->index], regno);
6126 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
6128 if (store_killed_after (ptr->pattern, ptr->pattern_regs, BB_HEAD (bb),
6129 bb, regs_set_in_block, NULL))
6131 /* It should not be necessary to consider the expression
6132 killed if it is both anticipatable and available. */
6133 if (!TEST_BIT (st_antloc[bb->index], ptr->index)
6134 || !TEST_BIT (ae_gen[bb->index], ptr->index))
6135 SET_BIT (ae_kill[bb->index], ptr->index);
6137 else
6138 SET_BIT (transp[bb->index], ptr->index);
6142 free (regs_set_in_block);
6144 if (dump_file)
6146 dump_sbitmap_vector (dump_file, "st_antloc", "", st_antloc, last_basic_block);
6147 dump_sbitmap_vector (dump_file, "st_kill", "", ae_kill, last_basic_block);
6148 dump_sbitmap_vector (dump_file, "Transpt", "", transp, last_basic_block);
6149 dump_sbitmap_vector (dump_file, "st_avloc", "", ae_gen, last_basic_block);
6153 /* Insert an instruction at the beginning of a basic block, and update
6154 the BB_HEAD if needed. */
6156 static void
6157 insert_insn_start_basic_block (rtx insn, basic_block bb)
6159 /* Insert at start of successor block. */
6160 rtx prev = PREV_INSN (BB_HEAD (bb));
6161 rtx before = BB_HEAD (bb);
6162 while (before != 0)
6164 if (! LABEL_P (before)
6165 && !NOTE_INSN_BASIC_BLOCK_P (before))
6166 break;
6167 prev = before;
6168 if (prev == BB_END (bb))
6169 break;
6170 before = NEXT_INSN (before);
6173 insn = emit_insn_after_noloc (insn, prev, bb);
6175 if (dump_file)
6177 fprintf (dump_file, "STORE_MOTION insert store at start of BB %d:\n",
6178 bb->index);
6179 print_inline_rtx (dump_file, insn, 6);
6180 fprintf (dump_file, "\n");
6184 /* This routine will insert a store on an edge. EXPR is the ldst entry for
6185 the memory reference, and E is the edge to insert it on. Returns nonzero
6186 if an edge insertion was performed. */
6188 static int
6189 insert_store (struct ls_expr * expr, edge e)
6191 rtx reg, insn;
6192 basic_block bb;
6193 edge tmp;
6194 edge_iterator ei;
6196 /* We did all the deleted before this insert, so if we didn't delete a
6197 store, then we haven't set the reaching reg yet either. */
6198 if (expr->reaching_reg == NULL_RTX)
6199 return 0;
6201 if (e->flags & EDGE_FAKE)
6202 return 0;
6204 reg = expr->reaching_reg;
6205 insn = gen_move_insn (copy_rtx (expr->pattern), reg);
6207 /* If we are inserting this expression on ALL predecessor edges of a BB,
6208 insert it at the start of the BB, and reset the insert bits on the other
6209 edges so we don't try to insert it on the other edges. */
6210 bb = e->dest;
6211 FOR_EACH_EDGE (tmp, ei, e->dest->preds)
6212 if (!(tmp->flags & EDGE_FAKE))
6214 int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
6216 gcc_assert (index != EDGE_INDEX_NO_EDGE);
6217 if (! TEST_BIT (pre_insert_map[index], expr->index))
6218 break;
6221 /* If tmp is NULL, we found an insertion on every edge, blank the
6222 insertion vector for these edges, and insert at the start of the BB. */
6223 if (!tmp && bb != EXIT_BLOCK_PTR)
6225 FOR_EACH_EDGE (tmp, ei, e->dest->preds)
6227 int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
6228 RESET_BIT (pre_insert_map[index], expr->index);
6230 insert_insn_start_basic_block (insn, bb);
6231 return 0;
6234 /* We can't put stores in the front of blocks pointed to by abnormal
6235 edges since that may put a store where one didn't used to be. */
6236 gcc_assert (!(e->flags & EDGE_ABNORMAL));
6238 insert_insn_on_edge (insn, e);
6240 if (dump_file)
6242 fprintf (dump_file, "STORE_MOTION insert insn on edge (%d, %d):\n",
6243 e->src->index, e->dest->index);
6244 print_inline_rtx (dump_file, insn, 6);
6245 fprintf (dump_file, "\n");
6248 return 1;
6251 /* Remove any REG_EQUAL or REG_EQUIV notes containing a reference to the
6252 memory location in SMEXPR set in basic block BB.
6254 This could be rather expensive. */
6256 static void
6257 remove_reachable_equiv_notes (basic_block bb, struct ls_expr *smexpr)
6259 edge_iterator *stack, ei;
6260 int sp;
6261 edge act;
6262 sbitmap visited = sbitmap_alloc (last_basic_block);
6263 rtx last, insn, note;
6264 rtx mem = smexpr->pattern;
6266 stack = XNEWVEC (edge_iterator, n_basic_blocks);
6267 sp = 0;
6268 ei = ei_start (bb->succs);
6270 sbitmap_zero (visited);
6272 act = (EDGE_COUNT (ei_container (ei)) > 0 ? EDGE_I (ei_container (ei), 0) : NULL);
6273 while (1)
6275 if (!act)
6277 if (!sp)
6279 free (stack);
6280 sbitmap_free (visited);
6281 return;
6283 act = ei_edge (stack[--sp]);
6285 bb = act->dest;
6287 if (bb == EXIT_BLOCK_PTR
6288 || TEST_BIT (visited, bb->index))
6290 if (!ei_end_p (ei))
6291 ei_next (&ei);
6292 act = (! ei_end_p (ei)) ? ei_edge (ei) : NULL;
6293 continue;
6295 SET_BIT (visited, bb->index);
6297 if (TEST_BIT (st_antloc[bb->index], smexpr->index))
6299 for (last = ANTIC_STORE_LIST (smexpr);
6300 BLOCK_FOR_INSN (XEXP (last, 0)) != bb;
6301 last = XEXP (last, 1))
6302 continue;
6303 last = XEXP (last, 0);
6305 else
6306 last = NEXT_INSN (BB_END (bb));
6308 for (insn = BB_HEAD (bb); insn != last; insn = NEXT_INSN (insn))
6309 if (INSN_P (insn))
6311 note = find_reg_equal_equiv_note (insn);
6312 if (!note || !expr_equiv_p (XEXP (note, 0), mem))
6313 continue;
6315 if (dump_file)
6316 fprintf (dump_file, "STORE_MOTION drop REG_EQUAL note at insn %d:\n",
6317 INSN_UID (insn));
6318 remove_note (insn, note);
6321 if (!ei_end_p (ei))
6322 ei_next (&ei);
6323 act = (! ei_end_p (ei)) ? ei_edge (ei) : NULL;
6325 if (EDGE_COUNT (bb->succs) > 0)
6327 if (act)
6328 stack[sp++] = ei;
6329 ei = ei_start (bb->succs);
6330 act = (EDGE_COUNT (ei_container (ei)) > 0 ? EDGE_I (ei_container (ei), 0) : NULL);
6335 /* This routine will replace a store with a SET to a specified register. */
6337 static void
6338 replace_store_insn (rtx reg, rtx del, basic_block bb, struct ls_expr *smexpr)
6340 rtx insn, mem, note, set, ptr, pair;
6342 mem = smexpr->pattern;
6343 insn = gen_move_insn (reg, SET_SRC (single_set (del)));
6345 for (ptr = ANTIC_STORE_LIST (smexpr); ptr; ptr = XEXP (ptr, 1))
6346 if (XEXP (ptr, 0) == del)
6348 XEXP (ptr, 0) = insn;
6349 break;
6352 /* Move the notes from the deleted insn to its replacement, and patch
6353 up the LIBCALL notes. */
6354 REG_NOTES (insn) = REG_NOTES (del);
6356 note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
6357 if (note)
6359 pair = XEXP (note, 0);
6360 note = find_reg_note (pair, REG_LIBCALL, NULL_RTX);
6361 XEXP (note, 0) = insn;
6363 note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
6364 if (note)
6366 pair = XEXP (note, 0);
6367 note = find_reg_note (pair, REG_RETVAL, NULL_RTX);
6368 XEXP (note, 0) = insn;
6371 /* Emit the insn AFTER all the notes are transferred.
6372 This is cheaper since we avoid df rescanning for the note change. */
6373 insn = emit_insn_after (insn, del);
6375 if (dump_file)
6377 fprintf (dump_file,
6378 "STORE_MOTION delete insn in BB %d:\n ", bb->index);
6379 print_inline_rtx (dump_file, del, 6);
6380 fprintf (dump_file, "\nSTORE MOTION replaced with insn:\n ");
6381 print_inline_rtx (dump_file, insn, 6);
6382 fprintf (dump_file, "\n");
6385 delete_insn (del);
6387 /* Now we must handle REG_EQUAL notes whose contents is equal to the mem;
6388 they are no longer accurate provided that they are reached by this
6389 definition, so drop them. */
6390 for (; insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn))
6391 if (INSN_P (insn))
6393 set = single_set (insn);
6394 if (!set)
6395 continue;
6396 if (expr_equiv_p (SET_DEST (set), mem))
6397 return;
6398 note = find_reg_equal_equiv_note (insn);
6399 if (!note || !expr_equiv_p (XEXP (note, 0), mem))
6400 continue;
6402 if (dump_file)
6403 fprintf (dump_file, "STORE_MOTION drop REG_EQUAL note at insn %d:\n",
6404 INSN_UID (insn));
6405 remove_note (insn, note);
6407 remove_reachable_equiv_notes (bb, smexpr);
6411 /* Delete a store, but copy the value that would have been stored into
6412 the reaching_reg for later storing. */
6414 static void
6415 delete_store (struct ls_expr * expr, basic_block bb)
6417 rtx reg, i, del;
6419 if (expr->reaching_reg == NULL_RTX)
6420 expr->reaching_reg = gen_reg_rtx (GET_MODE (expr->pattern));
6422 reg = expr->reaching_reg;
6424 for (i = AVAIL_STORE_LIST (expr); i; i = XEXP (i, 1))
6426 del = XEXP (i, 0);
6427 if (BLOCK_FOR_INSN (del) == bb)
6429 /* We know there is only one since we deleted redundant
6430 ones during the available computation. */
6431 replace_store_insn (reg, del, bb, expr);
6432 break;
6437 /* Free memory used by store motion. */
6439 static void
6440 free_store_memory (void)
6442 free_ldst_mems ();
6444 if (ae_gen)
6445 sbitmap_vector_free (ae_gen);
6446 if (ae_kill)
6447 sbitmap_vector_free (ae_kill);
6448 if (transp)
6449 sbitmap_vector_free (transp);
6450 if (st_antloc)
6451 sbitmap_vector_free (st_antloc);
6452 if (pre_insert_map)
6453 sbitmap_vector_free (pre_insert_map);
6454 if (pre_delete_map)
6455 sbitmap_vector_free (pre_delete_map);
6456 if (reg_set_in_block)
6457 sbitmap_vector_free (reg_set_in_block);
6459 ae_gen = ae_kill = transp = st_antloc = NULL;
6460 pre_insert_map = pre_delete_map = reg_set_in_block = NULL;
6463 /* Perform store motion. Much like gcse, except we move expressions the
6464 other way by looking at the flowgraph in reverse. */
6466 static void
6467 store_motion (void)
6469 basic_block bb;
6470 int x;
6471 struct ls_expr * ptr;
6472 int update_flow = 0;
6474 if (dump_file)
6476 fprintf (dump_file, "before store motion\n");
6477 print_rtl (dump_file, get_insns ());
6480 init_alias_analysis ();
6482 /* Find all the available and anticipatable stores. */
6483 num_stores = compute_store_table ();
6484 if (num_stores == 0)
6486 htab_delete (pre_ldst_table);
6487 pre_ldst_table = NULL;
6488 sbitmap_vector_free (reg_set_in_block);
6489 end_alias_analysis ();
6490 return;
6493 /* Now compute kill & transp vectors. */
6494 build_store_vectors ();
6495 add_noreturn_fake_exit_edges ();
6496 connect_infinite_loops_to_exit ();
6498 edge_list = pre_edge_rev_lcm (num_stores, transp, ae_gen,
6499 st_antloc, ae_kill, &pre_insert_map,
6500 &pre_delete_map);
6502 /* Now we want to insert the new stores which are going to be needed. */
6503 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
6505 /* If any of the edges we have above are abnormal, we can't move this
6506 store. */
6507 for (x = NUM_EDGES (edge_list) - 1; x >= 0; x--)
6508 if (TEST_BIT (pre_insert_map[x], ptr->index)
6509 && (INDEX_EDGE (edge_list, x)->flags & EDGE_ABNORMAL))
6510 break;
6512 if (x >= 0)
6514 if (dump_file != NULL)
6515 fprintf (dump_file,
6516 "Can't replace store %d: abnormal edge from %d to %d\n",
6517 ptr->index, INDEX_EDGE (edge_list, x)->src->index,
6518 INDEX_EDGE (edge_list, x)->dest->index);
6519 continue;
6522 /* Now we want to insert the new stores which are going to be needed. */
6524 FOR_EACH_BB (bb)
6525 if (TEST_BIT (pre_delete_map[bb->index], ptr->index))
6526 delete_store (ptr, bb);
6528 for (x = 0; x < NUM_EDGES (edge_list); x++)
6529 if (TEST_BIT (pre_insert_map[x], ptr->index))
6530 update_flow |= insert_store (ptr, INDEX_EDGE (edge_list, x));
6533 if (update_flow)
6534 commit_edge_insertions ();
6536 free_store_memory ();
6537 free_edge_list (edge_list);
6538 remove_fake_exit_edges ();
6539 end_alias_analysis ();
6543 /* Entry point for jump bypassing optimization pass. */
6545 static int
6546 bypass_jumps (void)
6548 int changed;
6550 /* We do not construct an accurate cfg in functions which call
6551 setjmp, so just punt to be safe. */
6552 if (current_function_calls_setjmp)
6553 return 0;
6555 /* Identify the basic block information for this function, including
6556 successors and predecessors. */
6557 max_gcse_regno = max_reg_num ();
6559 if (dump_file)
6560 dump_flow_info (dump_file, dump_flags);
6562 /* Return if there's nothing to do, or it is too expensive. */
6563 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
6564 || is_too_expensive (_ ("jump bypassing disabled")))
6565 return 0;
6567 gcc_obstack_init (&gcse_obstack);
6568 bytes_used = 0;
6570 /* We need alias. */
6571 init_alias_analysis ();
6573 /* Record where pseudo-registers are set. This data is kept accurate
6574 during each pass. ??? We could also record hard-reg information here
6575 [since it's unchanging], however it is currently done during hash table
6576 computation.
6578 It may be tempting to compute MEM set information here too, but MEM sets
6579 will be subject to code motion one day and thus we need to compute
6580 information about memory sets when we build the hash tables. */
6582 alloc_reg_set_mem (max_gcse_regno);
6583 compute_sets ();
6585 max_gcse_regno = max_reg_num ();
6586 alloc_gcse_mem ();
6587 changed = one_cprop_pass (MAX_GCSE_PASSES + 2, true, true);
6588 free_gcse_mem ();
6590 if (dump_file)
6592 fprintf (dump_file, "BYPASS of %s: %d basic blocks, ",
6593 current_function_name (), n_basic_blocks);
6594 fprintf (dump_file, "%d bytes\n\n", bytes_used);
6597 obstack_free (&gcse_obstack, NULL);
6598 free_reg_set_mem ();
6600 /* We are finished with alias. */
6601 end_alias_analysis ();
6603 return changed;
6606 /* Return true if the graph is too expensive to optimize. PASS is the
6607 optimization about to be performed. */
6609 static bool
6610 is_too_expensive (const char *pass)
6612 /* Trying to perform global optimizations on flow graphs which have
6613 a high connectivity will take a long time and is unlikely to be
6614 particularly useful.
6616 In normal circumstances a cfg should have about twice as many
6617 edges as blocks. But we do not want to punish small functions
6618 which have a couple switch statements. Rather than simply
6619 threshold the number of blocks, uses something with a more
6620 graceful degradation. */
6621 if (n_edges > 20000 + n_basic_blocks * 4)
6623 warning (OPT_Wdisabled_optimization,
6624 "%s: %d basic blocks and %d edges/basic block",
6625 pass, n_basic_blocks, n_edges / n_basic_blocks);
6627 return true;
6630 /* If allocating memory for the cprop bitmap would take up too much
6631 storage it's better just to disable the optimization. */
6632 if ((n_basic_blocks
6633 * SBITMAP_SET_SIZE (max_reg_num ())
6634 * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
6636 warning (OPT_Wdisabled_optimization,
6637 "%s: %d basic blocks and %d registers",
6638 pass, n_basic_blocks, max_reg_num ());
6640 return true;
6643 return false;
6646 static bool
6647 gate_handle_jump_bypass (void)
6649 return optimize > 0 && flag_gcse;
6652 /* Perform jump bypassing and control flow optimizations. */
6653 static unsigned int
6654 rest_of_handle_jump_bypass (void)
6656 delete_unreachable_blocks ();
6657 if (bypass_jumps ())
6659 delete_trivially_dead_insns (get_insns (), max_reg_num ());
6660 rebuild_jump_labels (get_insns ());
6661 cleanup_cfg (0);
6663 return 0;
6666 struct tree_opt_pass pass_jump_bypass =
6668 "bypass", /* name */
6669 gate_handle_jump_bypass, /* gate */
6670 rest_of_handle_jump_bypass, /* execute */
6671 NULL, /* sub */
6672 NULL, /* next */
6673 0, /* static_pass_number */
6674 TV_BYPASS, /* tv_id */
6675 0, /* properties_required */
6676 0, /* properties_provided */
6677 0, /* properties_destroyed */
6678 0, /* todo_flags_start */
6679 TODO_dump_func |
6680 TODO_ggc_collect | TODO_verify_flow, /* todo_flags_finish */
6681 'G' /* letter */
6685 static bool
6686 gate_handle_gcse (void)
6688 return optimize > 0 && flag_gcse;
6692 static unsigned int
6693 rest_of_handle_gcse (void)
6695 int save_csb, save_cfj;
6696 int tem2 = 0, tem;
6697 tem = gcse_main (get_insns ());
6698 delete_trivially_dead_insns (get_insns (), max_reg_num ());
6699 rebuild_jump_labels (get_insns ());
6700 save_csb = flag_cse_skip_blocks;
6701 save_cfj = flag_cse_follow_jumps;
6702 flag_cse_skip_blocks = flag_cse_follow_jumps = 0;
6704 /* If -fexpensive-optimizations, re-run CSE to clean up things done
6705 by gcse. */
6706 if (flag_expensive_optimizations)
6708 timevar_push (TV_CSE);
6709 tem2 = cse_main (get_insns (), max_reg_num ());
6710 df_finish_pass ();
6711 purge_all_dead_edges ();
6712 delete_trivially_dead_insns (get_insns (), max_reg_num ());
6713 timevar_pop (TV_CSE);
6714 cse_not_expected = !flag_rerun_cse_after_loop;
6717 /* If gcse or cse altered any jumps, rerun jump optimizations to clean
6718 things up. */
6719 if (tem || tem2)
6721 timevar_push (TV_JUMP);
6722 rebuild_jump_labels (get_insns ());
6723 cleanup_cfg (0);
6724 timevar_pop (TV_JUMP);
6727 flag_cse_skip_blocks = save_csb;
6728 flag_cse_follow_jumps = save_cfj;
6729 return 0;
6732 struct tree_opt_pass pass_gcse =
6734 "gcse1", /* name */
6735 gate_handle_gcse, /* gate */
6736 rest_of_handle_gcse, /* execute */
6737 NULL, /* sub */
6738 NULL, /* next */
6739 0, /* static_pass_number */
6740 TV_GCSE, /* tv_id */
6741 0, /* properties_required */
6742 0, /* properties_provided */
6743 0, /* properties_destroyed */
6744 0, /* todo_flags_start */
6745 TODO_df_finish |
6746 TODO_dump_func |
6747 TODO_verify_flow | TODO_ggc_collect, /* todo_flags_finish */
6748 'G' /* letter */
6752 #include "gt-gcse.h"